diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go new file mode 100644 index 00000000..f3bcb1f2 --- /dev/null +++ b/dialtesting/grpc.go @@ -0,0 +1,1022 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "bufio" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net" + "strings" + "text/template" + "time" + + pdesc "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/desc/protoparse" + "github.com/jhump/protoreflect/dynamic" + "github.com/jhump/protoreflect/dynamic/grpcdynamic" + "github.com/jhump/protoreflect/grpcreflect" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" +) + +var ( + _ TaskChild = (*GRPCTask)(nil) + _ ITask = (*GRPCTask)(nil) +) + +const ( + DefaultGRPCTimeout = 30 * time.Second + HealthCheckServiceName = "grpc.health.v1.Health" + HealthCheckMethodName = "Check" +) + +type GRPCOptCertificate struct { + IgnoreServerCertificateError bool `json:"ignore_server_certificate_error,omitempty"` + PrivateKey string `json:"private_key,omitempty"` + Certificate string `json:"certificate,omitempty"` + CaCert string `json:"ca,omitempty"` +} + +type GRPCSecret struct { + NoSaveResponseBody bool `json:"not_save,omitempty"` +} + +type GRPCSuccess struct { + Body []*SuccessOption `json:"body,omitempty"` + ResponseTime string `json:"response_time,omitempty"` + respTime time.Duration +} + +type GRPCProtoFilesDiscovery struct { + ProtoFiles map[string]string `json:"protofiles"` + FullMethod string `json:"full_method"` + JSONRequest string `json:"request,omitempty"` +} + +type GRPCReflectionDiscovery struct { + FullMethod string `json:"full_method"` + JSONRequest string `json:"request,omitempty"` +} + +type GRPCHealthCheckDiscovery struct { + Service string `json:"service,omitempty"` +} + +type GRPCOptRequest struct { + Metadata map[string]string `json:"metadata,omitempty"` + RequestTimeout string `json:"request_timeout,omitempty"` + ProtoFiles *GRPCProtoFilesDiscovery `json:"proto_files,omitempty"` + Reflection *GRPCReflectionDiscovery `json:"reflection,omitempty"` + HealthCheck *GRPCHealthCheckDiscovery `json:"health_check,omitempty"` +} + +type GRPCAdvanceOption struct { + RequestOptions *GRPCOptRequest `json:"request_options,omitempty"` + Certificate *GRPCOptCertificate `json:"certificate,omitempty"` + Secret *GRPCSecret `json:"secret,omitempty"` +} + +type GRPCTask struct { + *Task + Server string `json:"server"` + PostScript string `json:"post_script,omitempty"` + SuccessWhenLogic string `json:"success_when_logic"` + SuccessWhen []*GRPCSuccess `json:"success_when"` + AdvanceOptions *GRPCAdvanceOption `json:"advance_options,omitempty"` + + creds credentials.TransportCredentials + + result []byte + reqError string + reqCost time.Duration + timeout time.Duration + postScriptResult *ScriptResult + + rawTask *GRPCTask + healthMethodDescriptor *pdesc.MethodDescriptor // cached method descriptor for HealthCheck discovery + protoFilesMethodDescriptor *pdesc.MethodDescriptor // cached method descriptor for ProtoFiles discovery + reflectionMethodDescriptor *pdesc.MethodDescriptor // cached method descriptor for Reflection discovery +} + +func (t *GRPCTask) initTask() { + if t.Task == nil { + t.Task = &Task{} + } +} + +func (t *GRPCTask) check() error { + if t.Server == "" { + return fmt.Errorf("server address is required") + } + if t.AdvanceOptions != nil && + t.AdvanceOptions.RequestOptions != nil && + t.AdvanceOptions.RequestOptions.ProtoFiles != nil && + len(t.AdvanceOptions.RequestOptions.ProtoFiles.ProtoFiles) == 0 { + return fmt.Errorf("proto files not provided") + } + if t.getFullMethod() == "" { + return fmt.Errorf("full method is required") + } + if len(t.SuccessWhen) == 0 && t.PostScript == "" { + return fmt.Errorf(`no any check rule`) + } + + return nil +} + +func (t *GRPCTask) init() error { + if t.AdvanceOptions == nil || t.AdvanceOptions.RequestOptions == nil { + return fmt.Errorf("advance options required") + } + opt := t.AdvanceOptions + reqOpt := opt.RequestOptions + + t.timeout = DefaultGRPCTimeout + if reqOpt.RequestTimeout != "" { + timeout, err := time.ParseDuration(reqOpt.RequestTimeout) + if err != nil { + return fmt.Errorf("invalid timeout %q: %w", reqOpt.RequestTimeout, err) + } + t.timeout = timeout + } + + // init success checker + for _, checker := range t.SuccessWhen { + if checker == nil { + continue + } + if checker.ResponseTime != "" { + du, err := time.ParseDuration(checker.ResponseTime) + if err != nil { + return fmt.Errorf("invalid response time %q: %w", checker.ResponseTime, err) + } + checker.respTime = du + } + + // body + for _, v := range checker.Body { + if v == nil { + continue + } + if err := genReg(v); err != nil { + return fmt.Errorf("compile regex failed: %w", err) + } + } + } + + // setup transport credentials + var err error + t.creds, err = t.buildTLSCredentials() + if err != nil { + return fmt.Errorf("build TLS credentials failed: %w", err) + } + + // Cache method descriptor if using ProtoFiles discovery + if reqOpt.ProtoFiles != nil && len(reqOpt.ProtoFiles.ProtoFiles) > 0 { + _, err := t.findMethodAmongProtofiles() + if err != nil { + return fmt.Errorf("find method descriptor failed: %w", err) + } + } + + if reqOpt.HealthCheck != nil { + _, err := t.findHealthCheckMethod() + if err != nil { + return fmt.Errorf("find health check method failed: %w", err) + } + } + + return nil +} + +func (t *GRPCTask) buildTLSCredentials() (credentials.TransportCredentials, error) { + opt := t.AdvanceOptions + if opt == nil || opt.Certificate == nil { + return insecure.NewCredentials(), nil + } + + cert := opt.Certificate + + // if ignore server certificate error, use insecure TLS config + if cert.IgnoreServerCertificateError { + config := &tls.Config{ + InsecureSkipVerify: true, //nolint:gosec + } + return credentials.NewTLS(config), nil + } + + // if CA cert is provided, setup mTLS + if cert.CaCert != "" { + caCertPool := x509.NewCertPool() + if !caCertPool.AppendCertsFromPEM([]byte(cert.CaCert)) { + return nil, fmt.Errorf("failed to append CA certificate") + } + + config := &tls.Config{ + RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, + } + + // if client certificate and private key are provided, add them for mTLS + if cert.Certificate != "" && cert.PrivateKey != "" { + clientCert, err := tls.X509KeyPair([]byte(cert.Certificate), []byte(cert.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("failed to load client certificate: %w", err) + } + config.Certificates = []tls.Certificate{clientCert} + } + + return credentials.NewTLS(config), nil + } + + return insecure.NewCredentials(), nil +} + +func (t *GRPCTask) findMethod(ctx context.Context, conn *grpc.ClientConn) (*pdesc.MethodDescriptor, error) { + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil { + return nil, fmt.Errorf("request options required") + } + + reqOpt := opt.RequestOptions + + if reqOpt.ProtoFiles != nil { + if len(reqOpt.ProtoFiles.ProtoFiles) == 0 { + return nil, fmt.Errorf("proto files not provided") + } + return t.findMethodAmongProtofiles() + } + + if reqOpt.Reflection != nil { + return t.findMethodByReflection(ctx, conn) + } + + if reqOpt.HealthCheck != nil { + return t.findHealthCheckMethod() + } + + return nil, fmt.Errorf("no discovery method configured (proto_files, reflection, or health_check)") +} + +func (t *GRPCTask) findHealthCheckMethod() (*pdesc.MethodDescriptor, error) { + if t.healthMethodDescriptor != nil { + return t.healthMethodDescriptor, nil + } + + healthFD := grpc_health_v1.File_grpc_health_v1_health_proto + if healthFD == nil { + return nil, fmt.Errorf("health check file descriptor not available") + } + + fd, err := pdesc.WrapFile(healthFD) + if err != nil { + return nil, fmt.Errorf("wrap health check file descriptor failed: %w", err) + } + + sd := fd.FindService(HealthCheckServiceName) + if sd == nil { + return nil, fmt.Errorf("health check service %s not found", HealthCheckServiceName) + } + + md := sd.FindMethodByName(HealthCheckMethodName) + if md == nil { + return nil, fmt.Errorf("health check method %s not found", HealthCheckMethodName) + } + t.healthMethodDescriptor = md + + return md, nil +} + +func (t *GRPCTask) findMethodByReflection(ctx context.Context, conn *grpc.ClientConn) (*pdesc.MethodDescriptor, error) { + if t.reflectionMethodDescriptor != nil { + return t.reflectionMethodDescriptor, nil + } + + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil || opt.RequestOptions.Reflection == nil { + return nil, fmt.Errorf("reflection discovery not configured") + } + + fullMethod := opt.RequestOptions.Reflection.FullMethod + if fullMethod == "" { + return nil, fmt.Errorf("full method is required for reflection discovery") + } + fullMethod = strings.TrimPrefix(fullMethod, "/") + + rc := grpcreflect.NewClientAuto(ctx, conn) + defer rc.Reset() + + slash := strings.LastIndex(fullMethod, "/") + if slash == -1 { + return nil, fmt.Errorf("invalid full method name: %s", fullMethod) + } + serviceName := fullMethod[:slash] + + fd, err := rc.FileContainingSymbol(serviceName) + if err != nil { + return nil, err + } + + sd := fd.FindService(serviceName) + if sd == nil { + return nil, fmt.Errorf("service %s not found", serviceName) + } + + methodName := fullMethod[slash+1:] + md := sd.FindMethodByName(methodName) + if md == nil { + return nil, fmt.Errorf("method %s not found in service %s", methodName, serviceName) + } + t.reflectionMethodDescriptor = md + return md, nil +} + +func (t *GRPCTask) findMethodAmongProtofiles() (*pdesc.MethodDescriptor, error) { + // Return cached method descriptor if available + if t.protoFilesMethodDescriptor != nil { + return t.protoFilesMethodDescriptor, nil + } + + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil || opt.RequestOptions.ProtoFiles == nil { + return nil, fmt.Errorf("proto files discovery not configured") + } + + protoFiles := opt.RequestOptions.ProtoFiles.ProtoFiles + fullMethod := opt.RequestOptions.ProtoFiles.FullMethod + + if len(protoFiles) == 0 { + return nil, fmt.Errorf("proto files not provided") + } + if fullMethod == "" { + return nil, fmt.Errorf("full method is required for proto files discovery") + } + fullMethod = strings.TrimPrefix(fullMethod, "/") + + extendedMap, err := buildExtendedProtoMap(protoFiles) + if err != nil { + return nil, err + } + + p := protoparse.Parser{ + Accessor: protoparse.FileContentsFromMap(extendedMap), + InferImportPaths: true, + } + + desc, err := p.ParseFiles(getFileNames(protoFiles)...) + if err != nil { + return nil, fmt.Errorf("parse proto files failed: %w", err) + } + + sepIdx := strings.LastIndex(fullMethod, "/") + if sepIdx == -1 { + return nil, fmt.Errorf("invalid fullMethod: %q", fullMethod) + } + + service := fullMethod[:sepIdx] + method := fullMethod[sepIdx+1:] + for _, fd := range desc { + if sd := fd.FindService(service); sd != nil { + if md := sd.FindMethodByName(method); md != nil { + t.protoFilesMethodDescriptor = md + return md, nil + } + } + } + + return nil, fmt.Errorf("method %s not found in service %s", fullMethod, service) +} + +func buildExtendedProtoMap(protoFiles map[string]string) (map[string]string, error) { + extendedMap := make(map[string]string, len(protoFiles)) + for k, v := range protoFiles { + extendedMap[k] = v + } + var missingImports []string + for _, content := range protoFiles { + for _, imp := range extractImports(content) { + if _, ok := extendedMap[imp]; !ok { + missingImports = append(missingImports, imp) + } + } + } + if len(missingImports) > 0 { + return nil, fmt.Errorf("missing imports: %s", strings.Join(missingImports, ", ")) + } + return extendedMap, nil +} + +// extractImports extracts all import statements from proto file content. +func extractImports(content string) []string { + var imports []string + scanner := bufio.NewScanner(strings.NewReader(content)) + + for scanner.Scan() { + line := scanner.Text() + + // Remove inline comments (content after //) + if commentIdx := strings.Index(line, "//"); commentIdx != -1 { + line = line[:commentIdx] + } + + line = strings.TrimSpace(line) + + // Skip empty lines and comment lines + if line == "" || strings.HasPrefix(line, "//") { + continue + } + + // Check if it's an import statement + if !strings.HasPrefix(line, "import ") { + continue + } + + // Extract content in quotes + // Support both "import" and import formats + line = strings.TrimPrefix(line, "import") + line = strings.TrimSpace(line) + + // Remove semicolon if present + line = strings.TrimSuffix(line, ";") + line = strings.TrimSpace(line) + + // Extract quoted content + if strings.HasPrefix(line, `"`) && strings.HasSuffix(line, `"`) { + importPath := line[1 : len(line)-1] + if importPath != "" { + imports = append(imports, importPath) + } + } else if start := strings.Index(line, `"`); start != -1 { + // Handle case where quotes are not at the beginning + end := strings.LastIndex(line, `"`) + if end > start { + importPath := line[start+1 : end] + if importPath != "" { + imports = append(imports, importPath) + } + } + } + } + + return imports +} + +func getFileNames(files map[string]string) []string { + arr := make([]string, 0, len(files)) + for k := range files { + arr = append(arr, k) + } + return arr +} + +func (t *GRPCTask) getFullMethod() string { + if t.AdvanceOptions == nil || t.AdvanceOptions.RequestOptions == nil { + return "" + } + reqOpt := t.AdvanceOptions.RequestOptions + if reqOpt.ProtoFiles != nil { + return reqOpt.ProtoFiles.FullMethod + } + if reqOpt.Reflection != nil { + return reqOpt.Reflection.FullMethod + } + if reqOpt.HealthCheck != nil { + return fmt.Sprintf("%s/%s", HealthCheckServiceName, HealthCheckMethodName) + } + return "" +} + +func (t *GRPCTask) getJSONRequest() string { + if t.AdvanceOptions == nil || t.AdvanceOptions.RequestOptions == nil { + return "" + } + reqOpt := t.AdvanceOptions.RequestOptions + if reqOpt.ProtoFiles != nil { + return reqOpt.ProtoFiles.JSONRequest + } + if reqOpt.Reflection != nil { + return reqOpt.Reflection.JSONRequest + } + if reqOpt.HealthCheck != nil && reqOpt.HealthCheck.Service != "" { + healthReq := map[string]string{"service": reqOpt.HealthCheck.Service} + jsonReq, _ := json.Marshal(healthReq) + return string(jsonReq) + } + return "" +} + +func (t *GRPCTask) run() error { + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil { + t.reqError = "request options required" + return nil + } + + // Create connection (new connection for each run()) + ctx, cancel := context.WithTimeout(context.Background(), t.timeout) + defer cancel() + + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(t.creds), + grpc.WithBlock(), + } + + conn, err := grpc.DialContext(ctx, t.Server, dialOpts...) + if err != nil { + t.reqError = fmt.Sprintf("dial grpc server failed: %v", err) + return nil + } + defer func() { + _ = conn.Close() + }() + + // Find method + method, err := t.findMethod(ctx, conn) + if err != nil { + t.reqError = err.Error() + return nil + } + + reqOpt := opt.RequestOptions + + // Add metadata + if len(reqOpt.Metadata) > 0 { + md := metadata.New(reqOpt.Metadata) + ctx = metadata.NewOutgoingContext(ctx, md) + } + + // Build request message + msg := dynamic.NewMessage(method.GetInputType()) + + jsonRequest := t.getJSONRequest() + if jsonRequest != "" { + if err := msg.UnmarshalJSON([]byte(jsonRequest)); err != nil { + t.reqError = fmt.Sprintf("invalid message: %v", err) + return nil + } + } + + // Execute RPC call + rpcStart := time.Now() + stub := grpcdynamic.NewStub(conn) + resp, err := stub.InvokeRpc(ctx, method, msg) + t.reqCost = time.Since(rpcStart) + if err != nil { + t.reqError = err.Error() + return nil + } + + // dial test message + dynMsg, ok := resp.(*dynamic.Message) + if !ok { + t.reqError = fmt.Sprintf("unexpected response type: expected *dynamic.Message, got %T", resp) + return nil + } + + j, err := dynMsg.MarshalJSON() + if err != nil { + t.reqError = fmt.Sprintf("marshal response failed: %v", err) + return nil + } + t.result = j + + // run post script if provided + if t.PostScript != "" { + result, err := postScriptDoGRPC(t.PostScript, t.result) + if err != nil { + t.reqError = err.Error() + return nil + } + t.postScriptResult = result + } + + return nil +} + +func (t *GRPCTask) stop() { + // close connection in run() +} + +func (t *GRPCTask) clear() { + t.result = nil + t.reqError = "" + t.reqCost = 0 + t.postScriptResult = nil + if t.timeout == 0 { + t.timeout = DefaultGRPCTimeout + } +} + +func (t *GRPCTask) class() string { + return ClassGRPC +} + +func (t *GRPCTask) metricName() string { + return "grpc_dial_testing" +} + +func (t *GRPCTask) checkResult() ([]string, bool) { + var reasons []string + var succFlag bool + + if t.reqError != "" { + return []string{t.reqError}, false + } + if t.result == nil { + return []string{"no response"}, false + } + + // if no success conditions defined, default to success if no error + if len(t.SuccessWhen) == 0 && t.PostScript == "" { + return nil, true + } + + // check SuccessWhen conditions + for _, chk := range t.SuccessWhen { + if chk == nil { + continue + } + // check body + for _, v := range chk.Body { + if v == nil { + continue + } + if err := v.check(string(t.result), "response body"); err != nil { + reasons = append(reasons, err.Error()) + } else { + succFlag = true + } + } + + // check response time + if chk.respTime > 0 && t.reqCost > chk.respTime { + reasons = append(reasons, + fmt.Sprintf("gRPC response time(%v) larger than %v", t.reqCost, chk.respTime)) + } else if chk.respTime > 0 { + succFlag = true + } + } + + // check post script result + if t.postScriptResult != nil { + if t.postScriptResult.Result.IsFailed { + reasons = append(reasons, t.postScriptResult.Result.ErrorMessage) + } else { + succFlag = true + } + } + + return reasons, succFlag +} + +func (t *GRPCTask) getResults() (tags map[string]string, fields map[string]interface{}) { + tags = map[string]string{ + "name": t.Name, + "server": t.Server, + "method": t.getFullMethod(), + "status": "FAIL", + "proto": "grpc", + } + + fields = map[string]interface{}{ + "response_time": int64(t.reqCost) / 1000, + "success": int64(-1), + } + + if hostnames, err := t.getHostName(); err == nil && len(hostnames) > 0 { + tags["dest_host"] = hostnames[0] + } + + for k, v := range t.Tags { + tags[k] = v + } + + message := map[string]interface{}{} + + reasons, succFlag := t.checkResult() + + // check if we should save response body + notSave := false + if t.AdvanceOptions != nil && t.AdvanceOptions.Secret != nil && t.AdvanceOptions.Secret.NoSaveResponseBody { + notSave = true + } + + // apply SuccessWhenLogic + switch t.SuccessWhenLogic { + case "or": + if succFlag && t.reqError == "" { + tags["status"] = "OK" + fields["success"] = int64(1) + } else { + message["fail_reason"] = strings.Join(reasons, ";") + fields["fail_reason"] = strings.Join(reasons, ";") + } + default: // "and" or empty (default to "and") + if succFlag && len(reasons) == 0 && t.reqError == "" { + tags["status"] = "OK" + fields["success"] = int64(1) + } else { + message["fail_reason"] = strings.Join(reasons, ";") + fields["fail_reason"] = strings.Join(reasons, ";") + } + } + + message["response_time"] = int64(t.reqCost) / 1000 + if t.result != nil && !notSave { + message["response"] = string(t.result) + } + + data, err := json.Marshal(message) + if err != nil { + fields["message"] = err.Error() + } else { + if len(data) > MaxMsgSize { + fields["message"] = string(data[:MaxMsgSize]) + } else { + fields["message"] = string(data) + } + } + + return tags, fields +} + +func (t *GRPCTask) getVariableValue(variable Variable) (string, error) { + if variable.PostScript == "" && t.PostScript == "" { + return "", fmt.Errorf("post_script is empty") + } + + if variable.TaskVarName == "" { + return "", fmt.Errorf("task variable name is empty") + } + + if t.result == nil { + return "", fmt.Errorf("response body is empty") + } + + var result *ScriptResult + var err error + if variable.PostScript == "" { // use task post script + result = t.postScriptResult + } else { // use task variable post script + if result, err = postScriptDoGRPC(variable.PostScript, t.result); err != nil { + return "", fmt.Errorf("run pipeline failed: %w", err) + } + } + + if result == nil { + return "", fmt.Errorf("pipeline result is empty") + } + + value, ok := result.Vars[variable.TaskVarName] + if !ok { + return "", fmt.Errorf("task variable name not found") + } + return fmt.Sprintf("%v", value), nil +} + +func (t *GRPCTask) getHostName() ([]string, error) { + if t.Server == "" { + return nil, fmt.Errorf("server address is empty") + } + + host, _, err := net.SplitHostPort(t.Server) + if err == nil { + return []string{host}, nil + } + + return []string{t.Server}, nil +} + +func (t *GRPCTask) getRawTask(taskString string) (string, error) { + task := GRPCTask{} + + if err := json.Unmarshal([]byte(taskString), &task); err != nil { + return "", fmt.Errorf("unmarshal grpc task failed: %w", err) + } + + task.Task = nil + + bytes, err := json.Marshal(task) + if err != nil { + return "", fmt.Errorf("marshal grpc task failed: %w", err) + } + return string(bytes), nil +} + +func (t *GRPCTask) renderSuccessWhen(task *GRPCTask, fm template.FuncMap) error { + if task == nil { + return nil + } + + if task.SuccessWhen != nil { + for index, checker := range task.SuccessWhen { + if checker == nil { + continue + } + + // body + if checker.Body != nil { + for bodyIndex, v := range checker.Body { + if v != nil { + if err := t.renderSuccessOption(v, t.SuccessWhen[index].Body[bodyIndex], fm); err != nil { + return fmt.Errorf("render body failed: %w", err) + } + } + } + } + + // response time + if checker.ResponseTime != "" { + responseTime, err := t.GetParsedString(checker.ResponseTime, fm) + if err != nil { + return fmt.Errorf("render response time failed: %w", err) + } + t.SuccessWhen[index].ResponseTime = responseTime + } + } + } + + return nil +} + +func (t *GRPCTask) setReqError(err string) { + t.reqError = err +} + +func (t *GRPCTask) renderTemplate(fm template.FuncMap) error { + if t.rawTask == nil { + task := &GRPCTask{} + if err := t.NewRawTask(task); err != nil { + return fmt.Errorf("new raw task failed: %w", err) + } + t.rawTask = task + } + + task := t.rawTask + if task == nil { + return errors.New("raw task is nil") + } + + // server + server, err := t.GetParsedString(task.Server, fm) + if err != nil { + return fmt.Errorf("render server failed: %w", err) + } + t.Server = server + + // success when + if err := t.renderSuccessWhen(task, fm); err != nil { + return fmt.Errorf("render success when failed: %w", err) + } + + // advance options + if err := t.renderAdvanceOptions(task, fm); err != nil { + return fmt.Errorf("render advance options failed: %w", err) + } + + return nil +} + +func (t *GRPCTask) renderAdvanceOptions(task *GRPCTask, fm template.FuncMap) error { + if task == nil || task.AdvanceOptions == nil { + return nil + } + + // request options + if err := t.renderRequestOptions(task.AdvanceOptions.RequestOptions, fm); err != nil { + return fmt.Errorf("render request options failed: %w", err) + } + + return nil +} + +func (t *GRPCTask) renderRequestOptions(requestOpt *GRPCOptRequest, fm template.FuncMap) error { + if requestOpt == nil { + return nil + } + + // request timeout + if requestOpt.RequestTimeout != "" { + timeout, err := t.GetParsedString(requestOpt.RequestTimeout, fm) + if err != nil { + return fmt.Errorf("render timeout failed: %w", err) + } + t.AdvanceOptions.RequestOptions.RequestTimeout = timeout + } + + // metadata + if len(requestOpt.Metadata) > 0 { + for k, v := range requestOpt.Metadata { + key, err := t.GetParsedString(k, fm) + if err != nil { + return fmt.Errorf("render metadata key %q failed: %w", k, err) + } + value, err := t.GetParsedString(v, fm) + if err != nil { + return fmt.Errorf("render metadata value for key %q failed: %w", k, err) + } + delete(t.AdvanceOptions.RequestOptions.Metadata, k) + t.AdvanceOptions.RequestOptions.Metadata[key] = value + } + } + + // proto files discovery + if err := t.renderProtoFiles(requestOpt.ProtoFiles, fm); err != nil { + return fmt.Errorf("render proto files failed: %w", err) + } + + // reflection discovery + if err := t.renderReflection(requestOpt.Reflection, fm); err != nil { + return fmt.Errorf("render reflection failed: %w", err) + } + + // health check discovery + if err := t.renderHealthCheck(requestOpt.HealthCheck, fm); err != nil { + return fmt.Errorf("render health check failed: %w", err) + } + + return nil +} + +func (t *GRPCTask) renderProtoFiles(protoFiles *GRPCProtoFilesDiscovery, fm template.FuncMap) error { + if protoFiles == nil { + return nil + } + + if protoFiles.FullMethod != "" { + fullMethod, err := t.GetParsedString(protoFiles.FullMethod, fm) + if err != nil { + return fmt.Errorf("render proto files full method failed: %w", err) + } + // if full method is changed, clear the cached method descriptor + if t.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod != fullMethod { + t.protoFilesMethodDescriptor = nil + } + t.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod = fullMethod + } + + if protoFiles.JSONRequest != "" { + jsonRequest, err := t.GetParsedString(protoFiles.JSONRequest, fm) + if err != nil { + return fmt.Errorf("render proto files JSON request failed: %w", err) + } + t.AdvanceOptions.RequestOptions.ProtoFiles.JSONRequest = jsonRequest + } + + return nil +} + +func (t *GRPCTask) renderReflection(reflection *GRPCReflectionDiscovery, fm template.FuncMap) error { + if reflection == nil { + return nil + } + + if reflection.FullMethod != "" { + fullMethod, err := t.GetParsedString(reflection.FullMethod, fm) + if err != nil { + return fmt.Errorf("render reflection full method failed: %w", err) + } + // if full method is changed, clear the cached method descriptor + if t.AdvanceOptions.RequestOptions.Reflection.FullMethod != fullMethod { + t.reflectionMethodDescriptor = nil + } + t.AdvanceOptions.RequestOptions.Reflection.FullMethod = fullMethod + } + + if reflection.JSONRequest != "" { + jsonRequest, err := t.GetParsedString(reflection.JSONRequest, fm) + if err != nil { + return fmt.Errorf("render reflection JSON request failed: %w", err) + } + t.AdvanceOptions.RequestOptions.Reflection.JSONRequest = jsonRequest + } + + return nil +} + +func (t *GRPCTask) renderHealthCheck(healthCheck *GRPCHealthCheckDiscovery, fm template.FuncMap) error { + if healthCheck == nil { + return nil + } + + if healthCheck.Service != "" { + service, err := t.GetParsedString(healthCheck.Service, fm) + if err != nil { + return fmt.Errorf("render health check service failed: %w", err) + } + t.AdvanceOptions.RequestOptions.HealthCheck.Service = service + } + + return nil +} diff --git a/dialtesting/grpc_script.go b/dialtesting/grpc_script.go new file mode 100644 index 00000000..92ee2377 --- /dev/null +++ b/dialtesting/grpc_script.go @@ -0,0 +1,140 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/GuanceCloud/cliutils/point" + "github.com/GuanceCloud/pipeline-go/lang" + "github.com/GuanceCloud/pipeline-go/lang/platypus" + "github.com/GuanceCloud/pipeline-go/ptinput" +) + +type ScriptGRPCRequestResponse struct { + Body string `json:"body"` +} + +func (h *ScriptGRPCRequestResponse) String() (string, error) { + bytes, err := json.Marshal(h) + if err != nil { + return "", fmt.Errorf("response marshal failed: %w", err) + } + return string(bytes), nil +} + +type ScriptGRPCMessage struct { + Response *ScriptGRPCRequestResponse `json:"response"` + Vars *Vars `json:"vars"` +} + +// postScriptDoGRPC run pipeline script for gRPC response and return result. +// +// bodyBytes is the JSON body of the gRPC response. +func postScriptDoGRPC(script string, bodyBytes []byte) (*ScriptResult, error) { + if script == "" || bodyBytes == nil { + return &ScriptResult{}, nil + } + + response := &ScriptGRPCRequestResponse{ + Body: string(bodyBytes), + } + + result, err := runPipelineGRPC(script, response, nil) + if err != nil { + return nil, fmt.Errorf("run pipeline failed: %w", err) + } + return result, nil +} + +func runPipelineGRPC(script string, response *ScriptGRPCRequestResponse, vars *Vars) (*ScriptResult, error) { + scriptName := "script" + + script = fmt.Sprintf(` + content = load_json(_) + response = content["response"] + vars = content["vars"] + result = {} + + %s + + add_key(result, result) + add_key(vars, vars) + `, script) + + pls, errs := platypus.NewScripts( + map[string]string{scriptName: script}, + lang.WithCat(point.Logging), + ) + + defer func() { + for _, pl := range pls { + pl.Cleanup() + } + }() + + for k, v := range errs { + return nil, fmt.Errorf("new scripts failed: %s, %w", k, v) + } + + pl, ok := pls[scriptName] + if !ok { + return nil, fmt.Errorf("script %s not found", scriptName) + } + + if vars == nil { + vars = &Vars{} + } + + message := &ScriptGRPCMessage{ + Response: response, + Vars: vars, + } + + messageBytes, err := json.Marshal(message) + if err != nil { + return nil, fmt.Errorf("message marshal failed: %w", err) + } + messageString := string(messageBytes) + + fileds := map[string]interface{}{ + "message": messageString, + } + + pt := ptinput.NewPlPoint(point.Logging, "test", nil, fileds, time.Now()) + + if err := pl.Run(pt, nil, nil); err != nil { + return nil, fmt.Errorf("run failed: %w", err) + } + + resultFields := pt.Fields() + + result := ScriptHTTPResult{} + + if val, ok := resultFields["result"]; !ok { + return nil, fmt.Errorf("result not found") + } else if err := json.Unmarshal([]byte(getFiledString(val)), &result); err != nil { + return nil, fmt.Errorf("unmarshal result failed: %w", err) + } + + if val, ok := resultFields["vars"]; !ok { + return nil, fmt.Errorf("vars not found") + } else if err := json.Unmarshal([]byte(getFiledString(val)), &vars); err != nil { + return nil, fmt.Errorf("unmarshal vars failed: %w", err) + } + + // limit error message length + if len(result.ErrorMessage) > MaxErrorMessageSize { + result.ErrorMessage = result.ErrorMessage[:MaxErrorMessageSize] + "..." + } + + return &ScriptResult{ + Result: result, + Vars: *vars, + }, nil +} diff --git a/dialtesting/grpc_script_test.go b/dialtesting/grpc_script_test.go new file mode 100644 index 00000000..9e4bf26b --- /dev/null +++ b/dialtesting/grpc_script_test.go @@ -0,0 +1,141 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPostScriptDoGRPC(t *testing.T) { + t.Run("success - extract message field", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +vars["message"] = body["message"] +result["is_failed"] = false + ` + + body := []byte(`{"message":"你好, test! 这是来自 gRPC 的问候"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.Result.IsFailed) + assert.Equal(t, "你好, test! 这是来自 gRPC 的问候", result.Vars["message"]) + }) + + t.Run("success - extract multiple fields", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +vars["message"] = body["message"] +vars["status"] = body["status"] +result["is_failed"] = false + ` + + body := []byte(`{"message":"hello","status":"ok"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.Result.IsFailed) + assert.Equal(t, "hello", result.Vars["message"]) + assert.Equal(t, "ok", result.Vars["status"]) + }) + + t.Run("failure - missing required field", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +if body["message"] != nil { + vars["message"] = body["message"] + result["is_failed"] = false +} else { + result["is_failed"] = true + result["error_message"] = "响应中缺少 message 字段" +} + ` + + body := []byte(`{"status":"ok"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.True(t, result.Result.IsFailed) + assert.Equal(t, "响应中缺少 message 字段", result.Result.ErrorMessage) + }) + + t.Run("failure - custom error", func(t *testing.T) { + script := ` +result["is_failed"] = true +result["error_message"] = "custom error message" + ` + + body := []byte(`{"message":"hello"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.True(t, result.Result.IsFailed) + assert.Equal(t, "custom error message", result.Result.ErrorMessage) + }) + + t.Run("empty script", func(t *testing.T) { + script := "" + + body := []byte(`{"message":"hello"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + }) + + t.Run("nil body", func(t *testing.T) { + script := ` +vars["test"] = "value" +result["is_failed"] = false + ` + + result, err := postScriptDoGRPC(script, nil) + assert.NoError(t, err) + assert.NotNil(t, result) + }) + + t.Run("invalid JSON in response body", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +if body != nil { + vars["message"] = body["message"] + result["is_failed"] = false +} else { + result["is_failed"] = true + result["error_message"] = "invalid JSON" +} + ` + + body := []byte(`invalid json`) + + result, _ := postScriptDoGRPC(script, body) + assert.NotNil(t, result) + }) + + t.Run("complex nested JSON", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +vars["user_name"] = body["user"]["name"] +vars["user_age"] = body["user"]["age"] +result["is_failed"] = false + ` + + body := []byte(`{"user":{"name":"test","age":25},"status":"ok"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.Result.IsFailed) + assert.Equal(t, "test", result.Vars["user_name"]) + assert.Equal(t, float64(25), result.Vars["user_age"]) // JSON 数字会被解析为 float64 + }) +} diff --git a/dialtesting/grpc_test.go b/dialtesting/grpc_test.go new file mode 100644 index 00000000..725339b6 --- /dev/null +++ b/dialtesting/grpc_test.go @@ -0,0 +1,1333 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "encoding/json" + "os" + T "testing" + "text/template" + "time" + + "github.com/stretchr/testify/assert" +) + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_Check(t *T.T) { + t.Run("missing server", func(t *T.T) { + task := &GRPCTask{ + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{ + "greeter.proto": "syntax = \"proto3\";", + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, + } + task.initTask() + err := task.check() + assert.Error(t, err) + assert.Contains(t, err.Error(), "server address is required") + }) + + t.Run("missing proto files", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{}, // 空的 proto files + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, + } + task.initTask() + err := task.check() + assert.Error(t, err) + assert.Contains(t, err.Error(), "proto files not provided") + }) + + t.Run("missing full method", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, + } + task.initTask() + err := task.check() + assert.Error(t, err) + assert.Contains(t, err.Error(), "full method is required") + }) + + t.Run("missing check rule", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{ + "greeter.proto": "syntax = \"proto3\";", + }, + }, + }, + }, + } + task.initTask() + err := task.check() + assert.Error(t, err) + assert.Contains(t, err.Error(), "no any check rule") + }) +} + +func TestGRPCTask_Init(t *T.T) { + serverAddr := "localhost:50052" + t.Run("init with default timeout", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + assert.Equal(t, DefaultGRPCTimeout, task.timeout) + }) + + t.Run("init with custom timeout", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "10s", + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + assert.Equal(t, 10*time.Second, task.timeout) + }) + + t.Run("init with invalid timeout", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "invalid", + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid timeout") + }) + + t.Run("init with response time in success checker", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "1s", + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + assert.Equal(t, 1*time.Second, task.SuccessWhen[0].respTime) + }) + + t.Run("init with invalid response time", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "invalid", + }, + }, + } + task.initTask() + + err := task.init() + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid response time") + }) + + t.Run("init with body regex in success checker", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + }) + + t.Run("init with invalid regex in body", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {MatchRegex: "[invalid regex"}, + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.Error(t, err) + assert.Contains(t, err.Error(), "compile regex failed") + }) + + t.Run("init with TLS certificate - ignore server cert error", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + Certificate: &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + assert.NotNil(t, task.creds) + }) +} + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_GetResults(t *T.T) { + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Fatalf("Failed to read greeter.proto: %v", err) + } + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Fatalf("Failed to read common.proto: %v", err) + } + + t.Run("success result", func(t *T.T) { + task := &GRPCTask{ + Task: &Task{ + Name: "test-task", + Tags: map[string]string{ + "env": "test", + }, + }, + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: `body = load_json(response["body"]) +if body != nil && body["msg"] != nil { + result["is_failed"] = false + vars["msg"] = body["msg"] +} else { + result["is_failed"] = true + result["error_message"] = "响应中缺少 msg 字段" +}`, + } + task.initTask() + + err := task.check() + assert.NoError(t, err) + + err = task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + tags, fields := task.getResults() + t.Logf("tags: %v, fields: %v", tags, fields) + + // Check tags + assert.Equal(t, "test-task", tags["name"]) + assert.Equal(t, serverAddr, tags["server"]) + assert.Equal(t, "greeter.Greeter/SayHello", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + assert.Equal(t, "grpc", tags["proto"]) + assert.Equal(t, "test", tags["env"]) + + // Check fields + assert.Equal(t, int64(1), fields["success"]) + assert.Greater(t, fields["response_time"], int64(0)) + assert.NotNil(t, fields["message"]) + }) + + t.Run("failure result", func(t *T.T) { + task := &GRPCTask{ + Task: &Task{ + Name: "test-task-fail", + }, + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `invalid json`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: "", + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + tags, fields := task.getResults() + + // Check tags + assert.Equal(t, "FAIL", tags["status"]) + + // Check fields + assert.Equal(t, int64(-1), fields["success"]) + assert.NotNil(t, fields["fail_reason"]) + }) +} + +func TestGRPCTask_OtherMethods(t *T.T) { + t.Run("class", func(t *T.T) { + task := &GRPCTask{} + assert.Equal(t, ClassGRPC, task.class()) + }) + + t.Run("metricName", func(t *T.T) { + task := &GRPCTask{} + assert.Equal(t, "grpc_dial_testing", task.metricName()) + }) + + t.Run("initTask", func(t *T.T) { + task := &GRPCTask{} + task.initTask() + assert.NotNil(t, task.Task) + }) + + t.Run("clear", func(t *T.T) { + task := &GRPCTask{ + result: []byte("test"), + reqError: "error", + reqCost: 100 * time.Millisecond, + } + task.clear() + + assert.Nil(t, task.result) + assert.Empty(t, task.reqError) + assert.Equal(t, time.Duration(0), task.reqCost) + }) + + t.Run("checkResult", func(t *T.T) { + t.Run("success without conditions", func(t *T.T) { + task := &GRPCTask{ + result: []byte(`{"message":"hello test"}`), + } + reasons, flag := task.checkResult() + assert.Nil(t, reasons) + assert.True(t, flag) + }) + + t.Run("with error", func(t *T.T) { + task := &GRPCTask{ + reqError: "test error", + } + reasons, flag := task.checkResult() + assert.NotEmpty(t, reasons) + assert.False(t, flag) + assert.Equal(t, "test error", reasons[0]) + }) + + t.Run("no response", func(t *T.T) { + task := &GRPCTask{ + PostScript: "", + } + reasons, flag := task.checkResult() + assert.NotEmpty(t, reasons) + assert.False(t, flag) + assert.Contains(t, reasons[0], "no response") + }) + + t.Run("success with body check", func(t *T.T) { + task := &GRPCTask{ + result: []byte(`{"message":"hello test"}`), + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, + } + // Initialize regex patterns + for _, checker := range task.SuccessWhen { + for _, v := range checker.Body { + err := genReg(v) + assert.NoError(t, err) + } + } + reasons, flag := task.checkResult() + assert.Empty(t, reasons) + assert.True(t, flag) + }) + + t.Run("failure with body check", func(t *T.T) { + task := &GRPCTask{ + result: []byte(`{"message":"hello test"}`), + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "notfound"}, + }, + }, + }, + } + // Initialize regex patterns + for _, checker := range task.SuccessWhen { + for _, v := range checker.Body { + err := genReg(v) + assert.NoError(t, err) + } + } + reasons, _ := task.checkResult() + assert.NotEmpty(t, reasons) + }) + }) + + t.Run("getHostName", func(t *T.T) { + t.Run("with port", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + } + hostnames, err := task.getHostName() + assert.NoError(t, err) + assert.Equal(t, []string{"localhost"}, hostnames) + }) + + t.Run("empty server", func(t *T.T) { + task := &GRPCTask{} + _, err := task.getHostName() + assert.Error(t, err) + }) + }) + + t.Run("getVariableValue", func(t *T.T) { + t.Run("without post script", func(t *T.T) { + task := &GRPCTask{} + _, err := task.getVariableValue(Variable{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "post_script is empty") + }) + + t.Run("without result", func(t *T.T) { + task := &GRPCTask{ + PostScript: "vars[\"test\"] = \"value\"", + } + _, err := task.getVariableValue(Variable{ + TaskVarName: "test", + }) + assert.Error(t, err) + assert.Contains(t, err.Error(), "response body is empty") + }) + }) + + t.Run("getRawTask", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "30s", + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + } + task.initTask() + + taskJSON, _ := json.Marshal(task) + rawTask, err := task.getRawTask(string(taskJSON)) + assert.NoError(t, err) + + var parsed GRPCTask + err = json.Unmarshal([]byte(rawTask), &parsed) + assert.NoError(t, err) + assert.Equal(t, task.Server, parsed.Server) + assert.Equal(t, task.getFullMethod(), parsed.getFullMethod()) + assert.Equal(t, task.AdvanceOptions.RequestOptions.RequestTimeout, parsed.AdvanceOptions.RequestOptions.RequestTimeout) + }) +} + +func TestBuildExtendedProtoMap(t *T.T) { + t.Run("with all imports present", func(t *T.T) { + greeterProto := `syntax = "proto3"; +package greeter; +import "greeter/user.proto"; +service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} +}` + + userProto := `syntax = "proto3"; +package user; +message GetUserRequest { + int32 user_id = 1; +}` + + protoFiles := map[string]string{ + "greeter.proto": greeterProto, + "greeter/user.proto": userProto, + } + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) + + // Check original files are preserved + assert.Equal(t, greeterProto, extendedMap["greeter.proto"]) + assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) + assert.Equal(t, 2, len(extendedMap)) + }) + + t.Run("with missing import", func(t *T.T) { + greeterProto := `syntax = "proto3"; +package greeter; +import "greeter/user.proto"; +service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} +}` + + protoFiles := map[string]string{ + "greeter.proto": greeterProto, + // user.proto is missing + } + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing imports") + assert.Contains(t, err.Error(), "greeter/user.proto") + assert.Nil(t, extendedMap) + }) + + t.Run("with multiple imports all present", func(t *T.T) { + mainProto := `syntax = "proto3"; +package main; +import "greeter/user.proto"; +import "greeter/common.proto"; +service Main {} +` + + userProto := `syntax = "proto3"; +package user; +message User {} +` + + commonProto := `syntax = "proto3"; +package common; +message Common {} +` + + protoFiles := map[string]string{ + "main.proto": mainProto, + "greeter/user.proto": userProto, + "greeter/common.proto": commonProto, + } + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) + + // Check all files are preserved + assert.Equal(t, mainProto, extendedMap["main.proto"]) + assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) + assert.Equal(t, commonProto, extendedMap["greeter/common.proto"]) + assert.Equal(t, 3, len(extendedMap)) + }) + + t.Run("with multiple imports one missing", func(t *T.T) { + mainProto := `syntax = "proto3"; +package main; +import "greeter/user.proto"; +import "greeter/common.proto"; +service Main {} +` + + userProto := `syntax = "proto3"; +package user; +message User {} +` + + protoFiles := map[string]string{ + "main.proto": mainProto, + "greeter/user.proto": userProto, + // greeter/common.proto is missing + } + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing imports") + assert.Contains(t, err.Error(), "greeter/common.proto") + assert.Nil(t, extendedMap) + }) + + t.Run("with no imports", func(t *T.T) { + protoFiles := map[string]string{ + "simple.proto": `syntax = "proto3"; package simple;`, + } + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) + + // Should have 1 entry (original file) + assert.Equal(t, 1, len(extendedMap)) + assert.NotEmpty(t, extendedMap["simple.proto"]) + }) + + t.Run("with path in filename and no imports", func(t *T.T) { + protoFiles := map[string]string{ + "path/to/simple.proto": `syntax = "proto3"; package simple;`, + } + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) + + // Should have 1 entry (original path preserved) + assert.Equal(t, 1, len(extendedMap)) + assert.NotEmpty(t, extendedMap["path/to/simple.proto"]) + }) + + t.Run("with empty proto files", func(t *T.T) { + protoFiles := map[string]string{} + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) + assert.Equal(t, 0, len(extendedMap)) + }) +} + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_PostScript(t *T.T) { + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Fatalf("Failed to read greeter.proto: %v", err) + } + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Fatalf("Failed to read common.proto: %v", err) + } + + t.Run("post script success", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: ` +body = load_json(response["body"]) +vars["msg"] = body["msg"] +result["is_failed"] = false + `, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + if task.reqError != "" { + t.Fatalf("RPC call failed: %s", task.reqError) + } + assert.NotNil(t, task.postScriptResult, "postScriptResult should not be nil, reqError: %s", task.reqError) + if task.postScriptResult != nil { + assert.Equal(t, "你好, test! 这是来自 gRPC 的问候", task.postScriptResult.Vars["msg"]) + } + }) + + t.Run("post script failure", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: ` +result["is_failed"] = true +result["error_message"] = "custom error" + `, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) // script runs but marks as failed + assert.NotNil(t, task.postScriptResult) + assert.True(t, task.postScriptResult.Result.IsFailed) + + reasons, flag := task.checkResult() + assert.False(t, flag) + assert.NotEmpty(t, reasons) + }) +} + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_SuccessWhen(t *T.T) { + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Fatalf("Failed to read greeter.proto: %v", err) + } + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Fatalf("Failed to read common.proto: %v", err) + } + + t.Run("success with body contains", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "test"}, + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + reasons, flag := task.checkResult() + assert.True(t, flag) + assert.Empty(t, reasons) + + tags, fields := task.getResults() + assert.Equal(t, "OK", tags["status"]) + assert.Equal(t, int64(1), fields["success"]) + }) + + t.Run("failure with body check", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "notfound"}, + }, + }, + }, + SuccessWhenLogic: "and", + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + reasons, _ := task.checkResult() + // With "and" logic, if condition fails, it should fail + assert.NotEmpty(t, reasons) + }) + + t.Run("success with response time", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "10s", + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + _, flag := task.checkResult() + assert.True(t, flag) + }) + + t.Run("failure with response time exceeded", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "1ms", // very short timeout + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + // Add a small delay to ensure response time exceeds threshold + time.Sleep(10 * time.Millisecond) + + err = task.run() + assert.NoError(t, err) + + reasons, flag := task.checkResult() + // Response time check may pass or fail depending on actual response time + _ = reasons + _ = flag + }) +} + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_RequestDiscoveryModes(t *T.T) { + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + // 读取 proto 文件 + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Skipf("无法读取 greeter.proto 文件: %v", err) + } + + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Skipf("无法读取 common.proto 文件: %v", err) + } + + // 模式1: ProtoFiles 发现模式 + t.Run("Request mode 1: ProtoFiles discovery", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + RequestTimeout: "1s", + }, + Certificate: func() *GRPCOptCertificate { + // 回退到跳过证书验证模式 + return &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + } + }(), + }, + SuccessWhenLogic: "and", + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "test"}, + }, + ResponseTime: "1s", + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + assert.Empty(t, task.reqError) + assert.NotNil(t, task.result) + + tags, fields := task.getResults() + assert.Equal(t, "greeter.Greeter/SayHello", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + t.Logf("ProtoFiles mode - tags: %v, fields: %v", tags, fields) + }) + + // 模式2: Reflection 发现模式 + t.Run("Request mode 2: Reflection discovery", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"reflection-test"}`, + }, + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + RequestTimeout: "1s", + }, + Certificate: func() *GRPCOptCertificate { + // 回退到跳过证书验证模式 + return &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + } + }(), + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "reflection-test"}, + }, + ResponseTime: "1s", + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + assert.Empty(t, task.reqError) + assert.NotNil(t, task.result) + + tags, fields := task.getResults() + assert.Equal(t, "greeter.Greeter/SayHello", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + t.Logf("Reflection mode - tags: %v, fields: %v", tags, fields) + }) + + // 模式3: HealthCheck 发现模式 + t.Run("Request mode 3: HealthCheck discovery", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50053", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + HealthCheck: &GRPCHealthCheckDiscovery{ + // Service: "greeter.Greeter", + }, + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + RequestTimeout: "1s", + }, + Certificate: func() *GRPCOptCertificate { + // 回退到跳过证书验证模式 + return &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + } + }(), + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "SERVING"}, + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + assert.Empty(t, task.reqError) + assert.NotNil(t, task.result) + assert.Contains(t, string(task.result), "SERVING") + + tags, fields := task.getResults() + assert.Equal(t, "grpc.health.v1.Health/Check", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + t.Logf("HealthCheck mode - tags: %v, fields: %v", tags, fields) + }) +} + +func TestGRPCTask_RenderTemplate(t *T.T) { + t.Run("render template with all fields", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "{{server_host}}:{{server_port}}", + // PostScript is not rendered by template (static script content) + PostScript: "vars[\"message\"] = \"hello\"", + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "{{response_time}}", + Body: []*SuccessOption{ + {Contains: "{{body_contains}}"}, + }, + }, + }, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "{{timeout}}", + Metadata: map[string]string{ + "{{metadata_key}}": "{{metadata_value}}", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "{{service}}.{{method}}/{{rpc}}", + JSONRequest: "{{json_request}}", + }, + }, + }, + } + + fm := template.FuncMap{ + "server_host": func() string { + return "localhost" + }, + "server_port": func() string { + return "50051" + }, + "response_time": func() string { + return "100ms" + }, + "body_contains": func() string { + return "success" + }, + "timeout": func() string { + return "5s" + }, + "metadata_key": func() string { + return "api-key" + }, + "metadata_value": func() string { + return "test-key-123" + }, + "service": func() string { + return "greeter" + }, + "method": func() string { + return "Greeter" + }, + "rpc": func() string { + return "SayHello" + }, + "json_request": func() string { + return `{"name":"test"}` + }, + } + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + + // Verify server + assert.Equal(t, "localhost:50051", ct.Server) + + // Verify post script (not rendered, should remain unchanged) + assert.Equal(t, "vars[\"message\"] = \"hello\"", ct.PostScript) + + // Verify success when + assert.Equal(t, "100ms", ct.SuccessWhen[0].ResponseTime) + assert.Equal(t, "success", ct.SuccessWhen[0].Body[0].Contains) + + // Verify advance options + assert.Equal(t, "5s", ct.AdvanceOptions.RequestOptions.RequestTimeout) + assert.Equal(t, "test-key-123", ct.AdvanceOptions.RequestOptions.Metadata["api-key"]) + assert.Equal(t, "greeter.Greeter/SayHello", ct.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod) + assert.Equal(t, `{"name":"test"}`, ct.AdvanceOptions.RequestOptions.ProtoFiles.JSONRequest) + }) + + t.Run("render template with empty raw task", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "localhost:50051", + } + + fm := template.FuncMap{} + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + assert.Equal(t, "localhost:50051", ct.Server) + }) + + t.Run("render template with invalid template", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "{{invalid_func}}", + } + + fm := template.FuncMap{} + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + err = ct.renderTemplate(fm) + assert.Error(t, err) + assert.Contains(t, err.Error(), "render server failed") + }) + + t.Run("render template with reflection discovery", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "{{service}}.{{method}}/{{rpc}}", + JSONRequest: "{{json_request}}", + }, + }, + }, + } + + fm := template.FuncMap{ + "service": func() string { + return "greeter" + }, + "method": func() string { + return "Greeter" + }, + "rpc": func() string { + return "SayHello" + }, + "json_request": func() string { + return `{"name":"test"}` + }, + } + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + + assert.Equal(t, "greeter.Greeter/SayHello", ct.AdvanceOptions.RequestOptions.Reflection.FullMethod) + assert.Equal(t, `{"name":"test"}`, ct.AdvanceOptions.RequestOptions.Reflection.JSONRequest) + }) + + t.Run("render template with health check discovery", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + HealthCheck: &GRPCHealthCheckDiscovery{ + Service: "{{service_name}}", + }, + }, + }, + } + + fm := template.FuncMap{ + "service_name": func() string { + return "greeter.Greeter" + }, + } + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + + assert.Equal(t, "greeter.Greeter", ct.AdvanceOptions.RequestOptions.HealthCheck.Service) + }) +} diff --git a/dialtesting/grpcproto/common.proto b/dialtesting/grpcproto/common.proto new file mode 100644 index 00000000..22e8c87d --- /dev/null +++ b/dialtesting/grpcproto/common.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package common; + +option go_package = "datakittest/grpc/pb"; + +message result { + int32 code = 1; + string msg = 2; +} diff --git a/dialtesting/grpcproto/greeter.proto b/dialtesting/grpcproto/greeter.proto new file mode 100644 index 00000000..aafacab6 --- /dev/null +++ b/dialtesting/grpcproto/greeter.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package greeter; + +option go_package = "datakittest/grpc/pb"; + +import "grpcproto/common.proto"; + +service Greeter { + rpc SayHello (HelloRequest) returns (common.result) {} +} + +message HelloRequest { + string name = 1; +} diff --git a/dialtesting/task.go b/dialtesting/task.go index 9b59e7fd..1d6a1174 100644 --- a/dialtesting/task.go +++ b/dialtesting/task.go @@ -29,6 +29,7 @@ const ( ClassTCP = "TCP" ClassWebsocket = "WEBSOCKET" ClassICMP = "ICMP" + ClassGRPC = "GRPC" ClassDNS = "DNS" ClassHeadless = "BROWSER" ClassOther = "OTHER" @@ -239,6 +240,9 @@ func CreateTaskChild(taskType string) (TaskChild, error) { case "icmp", ClassICMP: ct = &ICMPTask{} + case "grpc", ClassGRPC: + ct = &GRPCTask{} + default: return nil, fmt.Errorf("unknown task type %s", taskType) } @@ -675,6 +679,13 @@ func (t *Task) GetPostScriptVars() Vars { return nil } + if ct, ok := t.child.(*GRPCTask); ok { + if ct.postScriptResult != nil { + return ct.postScriptResult.Vars + } + return nil + } + return nil } diff --git a/go.mod b/go.mod index c18b5e3e..e5fb5478 100644 --- a/go.mod +++ b/go.mod @@ -15,12 +15,13 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c github.com/influxdata/line-protocol/v2 v2.2.1 + github.com/jhump/protoreflect v1.15.1 github.com/klauspost/compress v1.16.7 github.com/pierrec/lz4/v4 v4.1.18 github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 - github.com/prometheus/prometheus v0.46.0 + github.com/prometheus/prometheus v0.39.1 github.com/robfig/cron/v3 v3.0.1 github.com/rs/xid v1.2.1 github.com/stretchr/testify v1.9.0 @@ -29,6 +30,7 @@ require ( golang.org/x/net v0.16.0 golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 + google.golang.org/grpc v1.51.0 google.golang.org/protobuf v1.31.0 gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -48,6 +50,7 @@ require ( github.com/araddon/dateparse v0.0.0-20201001162425-8aadafed4dc4 // indirect github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bufbuild/protocompile v0.4.0 // indirect github.com/bytedance/sonic v1.8.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect @@ -96,9 +99,11 @@ require ( golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.13.0 // indirect + golang.org/x/sync v0.4.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect diff --git a/go.sum b/go.sum index 5b510977..853ef84b 100644 --- a/go.sum +++ b/go.sum @@ -141,6 +141,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA= github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= @@ -528,6 +530,8 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -725,8 +729,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw= -github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4= +github.com/prometheus/prometheus v0.39.1 h1:abZM6A+sKAv2eKTbRIaHq4amM/nT07MuxRm0+QTaTj0= +github.com/prometheus/prometheus v0.39.1/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -1029,6 +1033,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1309,6 +1314,8 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1337,6 +1344,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/github.com/bufbuild/protocompile/.gitignore b/vendor/github.com/bufbuild/protocompile/.gitignore new file mode 100644 index 00000000..65b3b16c --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/.gitignore @@ -0,0 +1,3 @@ +*.iml +.idea/ +/.tmp/ diff --git a/vendor/github.com/bufbuild/protocompile/.golangci.yml b/vendor/github.com/bufbuild/protocompile/.golangci.yml new file mode 100644 index 00000000..37a369a6 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/.golangci.yml @@ -0,0 +1,99 @@ +run: + skip-dirs-use-default: false + skip-files: + - ".*\\.y\\.go$" +linters-settings: + errcheck: + check-type-assertions: true + forbidigo: + forbid: + - '^fmt\.Print' + - '^log\.' + - '^print$' + - '^println$' + - '^panic$' + gci: + # Section configuration to compare against. + # Section names are case-insensitive and may contain parameters in (). + # The default order of sections is `standard > default > custom > blank > dot`, + # If `custom-order` is `true`, it follows the order of `sections` option. + # Default: ["standard", "default"] + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(github.com/bufbuild/protocompile) # Custom section: groups all imports with the specified Prefix. + godox: + # TODO, OPT, etc. comments are fine to commit. Use FIXME comments for + # temporary hacks, and use godox to prevent committing them. + keywords: [FIXME] + varnamelen: + ignore-decls: + - T any + - i int + - wg sync.WaitGroup +linters: + enable-all: true + disable: + # TODO: TCN-350 - initial exclusions for failing linters. + # Should enable all of these. + - dupl + - errname + - errorlint + - exhaustive + - exhaustruct + - forbidigo + - forcetypeassert + - gochecknoglobals + - gochecknoinits + - goconst + - gocyclo + - goerr113 + - interfacebloat + - nestif + - nilerr + - nilnil + - nonamedreturns + - thelper + - varnamelen + # Other disabled linters + - cyclop # covered by gocyclo + - deadcode # deprecated by author + - exhaustivestruct # replaced by exhaustruct + - funlen # rely on code review to limit function length + - gocognit # dubious "cognitive overhead" quantification + - gofumpt # prefer standard gofmt + - golint # deprecated by Go team + - gomnd # some unnamed constants are okay + - ifshort # deprecated by author + - interfacer # deprecated by author + - ireturn # "accept interfaces, return structs" isn't ironclad + - lll # don't want hard limits for line length + - maintidx # covered by gocyclo + - maligned # readability trumps efficient struct packing + - nlreturn # generous whitespace violates house style + - nosnakecase # deprecated in https://github.com/golangci/golangci-lint/pull/3065 + - rowserrcheck # no SQL code in protocompile + - scopelint # deprecated by author + - sqlclosecheck # no SQL code in protocompile + - structcheck # deprecated by author + - testpackage # internal tests are fine + - varcheck # deprecated by author + - wastedassign # not supported with generics + - wrapcheck # don't _always_ need to wrap errors + - wsl # generous whitespace violates house style +issues: + exclude: + # Don't ban use of fmt.Errorf to create new errors, but the remaining + # checks from err113 are useful. + - "err113: do not define dynamic errors.*" + exclude-rules: + # Benchmarks can't be run in parallel + - path: benchmark_test\.go + linters: + - paralleltest + # dupword reports several errors in .proto test fixtures + # gosec reports a few minor issues in tests + - path: _test\.go + linters: + - dupword + - gosec diff --git a/vendor/github.com/bufbuild/protocompile/LICENSE b/vendor/github.com/bufbuild/protocompile/LICENSE new file mode 100644 index 00000000..04cf1e31 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2022 Buf Technologies, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/bufbuild/protocompile/Makefile b/vendor/github.com/bufbuild/protocompile/Makefile new file mode 100644 index 00000000..afca92cc --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/Makefile @@ -0,0 +1,159 @@ +# See https://tech.davis-hansson.com/p/make/ +SHELL := bash +.DELETE_ON_ERROR: +.SHELLFLAGS := -eu -o pipefail -c +.DEFAULT_GOAL := all +MAKEFLAGS += --warn-undefined-variables +MAKEFLAGS += --no-builtin-rules +MAKEFLAGS += --no-print-directory +BIN := $(abspath .tmp/bin) +COPYRIGHT_YEARS := 2020-2022 +LICENSE_IGNORE := -e /testdata/ +# Set to use a different compiler. For example, `GO=go1.18rc1 make test`. +GO ?= go +TOOLS_MOD_DIR := ./internal/tools +UNAME_OS := $(shell uname -s) +UNAME_ARCH := $(shell uname -m) + +# NB: this must be kept in sync with constant in internal/benchmarks. +PROTOC_VERSION ?= 22.0 +PROTOC_DIR := $(abspath ./internal/testdata/protoc/$(PROTOC_VERSION)) +PROTOC := $(PROTOC_DIR)/bin/protoc + +ifeq ($(UNAME_OS),Darwin) +PROTOC_OS := osx +ifeq ($(UNAME_ARCH),arm64) +PROTOC_ARCH := aarch_64 +else +PROTOC_ARCH := x86_64 +endif +endif +ifeq ($(UNAME_OS),Linux) +PROTOC_OS := linux +PROTOC_ARCH := $(UNAME_ARCH) +endif + +.PHONY: help +help: ## Describe useful make targets + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "%-30s %s\n", $$1, $$2}' + +.PHONY: all +all: ## Build, test, and lint (default) + $(MAKE) test + $(MAKE) lint + +.PHONY: clean +clean: ## Delete intermediate build artifacts + @# -X only removes untracked files, -d recurses into directories, -f actually removes files/dirs + git clean -Xdf + +.PHONY: test +test: build ## Run unit tests + $(GO) test -vet=off -race -cover ./... + +.PHONY: benchmarks +benchmarks: build ## Run benchmarks + cd internal/benchmarks && $(GO) test -bench=. -benchmem -v ./... + +.PHONY: build +build: generate ## Build all packages + $(GO) build ./... + +.PHONY: install +install: ## Install all binaries + $(GO) install ./... + +.PHONY: lint +lint: $(BIN)/golangci-lint ## Lint Go + $(GO) vet ./... ./internal/benchmarks/... + $(BIN)/golangci-lint run + cd internal/benchmarks && $(BIN)/golangci-lint run + +.PHONY: lintfix +lintfix: $(BIN)/golangci-lint ## Automatically fix some lint errors + $(BIN)/golangci-lint run --fix + cd internal/benchmarks && $(BIN)/golangci-lint run --fix + +.PHONY: generate +generate: $(BIN)/license-header $(BIN)/goyacc test-descriptors ## Regenerate code and licenses + PATH="$(BIN):$(PATH)" $(GO) generate ./... + @# We want to operate on a list of modified and new files, excluding + @# deleted and ignored files. git-ls-files can't do this alone. comm -23 takes + @# two files and prints the union, dropping lines common to both (-3) and + @# those only in the second file (-2). We make one git-ls-files call for + @# the modified, cached, and new (--others) files, and a second for the + @# deleted files. + comm -23 \ + <(git ls-files --cached --modified --others --no-empty-directory --exclude-standard | sort -u | grep -v $(LICENSE_IGNORE) ) \ + <(git ls-files --deleted | sort -u) | \ + xargs $(BIN)/license-header \ + --license-type apache \ + --copyright-holder "Buf Technologies, Inc." \ + --year-range "$(COPYRIGHT_YEARS)" + +.PHONY: upgrade +upgrade: ## Upgrade dependencies + go get -u -t ./... && go mod tidy -v + +.PHONY: checkgenerate +checkgenerate: + @# Used in CI to verify that `make generate` doesn't produce a diff. + test -z "$$(git status --porcelain | tee /dev/stderr)" + +$(BIN)/license-header: internal/tools/go.mod internal/tools/go.sum + @mkdir -p $(@D) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ github.com/bufbuild/buf/private/pkg/licenseheader/cmd/license-header + +$(BIN)/golangci-lint: internal/tools/go.mod internal/tools/go.sum + @mkdir -p $(@D) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ github.com/golangci/golangci-lint/cmd/golangci-lint + +$(BIN)/goyacc: internal/tools/go.mod internal/tools/go.sum + @mkdir -p $(@D) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ golang.org/x/tools/cmd/goyacc + +internal/testdata/protoc/cache/protoc-$(PROTOC_VERSION).zip: + @mkdir -p $(@D) + curl -o $@ -fsSL https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-$(PROTOC_ARCH).zip + +$(PROTOC): internal/testdata/protoc/cache/protoc-$(PROTOC_VERSION).zip + @mkdir -p $(@D) + unzip -o -q $< -d $(PROTOC_DIR) && \ + touch $@ + +internal/testdata/all.protoset: $(PROTOC) $(sort $(wildcard internal/testdata/*.proto)) + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/desc_test_complex.protoset: $(PROTOC) internal/testdata/desc_test_complex.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/desc_test_defaults.protoset: $(PROTOC) internal/testdata/desc_test_defaults.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/desc_test_proto3_optional.protoset: $(PROTOC) internal/testdata/desc_test_proto3_optional.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/descriptor_impl_tests.protoset: $(PROTOC) internal/testdata/desc_test2.proto internal/testdata/desc_test_defaults.proto internal/testdata/desc_test_proto3.proto internal/testdata/desc_test_proto3_optional.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/source_info.protoset: $(PROTOC) internal/testdata/desc_test_options.proto internal/testdata/desc_test_comments.proto internal/testdata/desc_test_complex.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_source_info -I. $(filter-out protoc,$(^F)) + +internal/testdata/options/test.protoset: $(PROTOC) internal/testdata/options/test.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) + +internal/testdata/options/test_proto3.protoset: $(PROTOC) internal/testdata/options/test_proto3.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) + +.PHONY: test-descriptors +test-descriptors: internal/testdata/all.protoset +test-descriptors: internal/testdata/desc_test_complex.protoset +test-descriptors: internal/testdata/desc_test_defaults.protoset +test-descriptors: internal/testdata/desc_test_proto3_optional.protoset +test-descriptors: internal/testdata/descriptor_impl_tests.protoset +test-descriptors: internal/testdata/source_info.protoset +test-descriptors: internal/testdata/options/test.protoset +test-descriptors: internal/testdata/options/test_proto3.protoset diff --git a/vendor/github.com/bufbuild/protocompile/README.md b/vendor/github.com/bufbuild/protocompile/README.md new file mode 100644 index 00000000..9d873330 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/README.md @@ -0,0 +1,91 @@ +![The Buf logo](./.github/buf-logo.svg) + +# Protocompile + +[![Build](https://github.com/bufbuild/protocompile/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/bufbuild/protocompile/actions/workflows/ci.yaml) +[![Report Card](https://goreportcard.com/badge/github.com/bufbuild/protocompile)](https://goreportcard.com/report/github.com/bufbuild/protocompile) +[![GoDoc](https://pkg.go.dev/badge/github.com/bufbuild/protocompile.svg)](https://pkg.go.dev/github.com/bufbuild/protocompile) + +This repo contains a parsing/linking engine for Protocol Buffers, written in pure Go. It is suitable as an alternative +to `protoc` (Google's official reference compiler for Protocol Buffers). This is the compiler that powers [Buf](https://buf.build) +and its bevy of tools. + +This repo is also the spiritual successor to the [`github.com/jhump/protoreflect/desc/protoparse`](https://godoc.org/github.com/jhump/protoreflect/desc/protoparse) +package. If you are looking for a newer version of `protoparse` that natively works with the newer Protobuf runtime +API for Go (`google.golang.org/protobuf`), you have found it! + +## Protocol Buffers + +If you've come across this repo but don't know what Protocol Buffers are, you might acquaint yourself with the [official +documentation](https://developers.google.com/protocol-buffers). Protocol Buffers, or Protobuf for short, is an IDL for +describing APIs and data structures and also a binary encoding format for efficiently transmitting and storing that +data. + +If you want to know more about the language itself, which is what this repo implements, take a look at Buf's +[Protobuf Guide](https://protobuf.com), which includes a very detailed language specification. + +### Descriptors + +Descriptors are the "lingua franca" for describing Protobuf data schemas. They are the basis of runtime features like +reflection and dynamic messages. They are also the output of a Protobuf compiler: a compiler can produce them and write +them to a file (whose contents are the binary-encoded form of a [`FileDescriptorSet`](https://github.com/protocolbuffers/protobuf/blob/v21.7/src/google/protobuf/descriptor.proto#L55-L59)) +or send them to a [plugin](https://docs.buf.build/reference/images#plugins) to generate code for a particular +programming language. + +Descriptors are similar to nodes in a syntax tree: the contents of a file descriptor correspond closely to the elements +in the source file from which it was generated. Also, the descriptor model's data structures are themselves defined in +[Protobuf](https://github.com/protocolbuffers/protobuf/blob/v21.7/src/google/protobuf/descriptor.proto). + +## Using This Repo + +The primary API of this repo is in this root package: `github.com/bufbuild/protocompile`. This is the suggested entry +point and provides a type named `Compiler`, for compiling Protobuf source files into descriptors. There are also +numerous sub-packages, most of which implement various stages of the compiler. Here's an overview (_not_ in alphabetical +order): + + * [`protocompile`](https://pkg.go.dev/github.com/bufbuild/protocompile): + This is the entry point, used to configure and initiate a compilation operation. + * [`parser`](https://pkg.go.dev/github.com/bufbuild/protocompile/parser): + This is the first stage of the compiler. It parses Protobuf source code and produces an AST. This package can also + generate a file descriptor proto from an AST. + * [`ast`](https://pkg.go.dev/github.com/bufbuild/protocompile/ast): + This package models an Abstract Syntax Tree (AST) for the Protobuf language. + * [`linker`](https://pkg.go.dev/github.com/bufbuild/protocompile/linker): + This is the second stage of the compiler. The descriptor proto (generated from an AST) is linked, producing a more + useful data structure than simple descriptor protos. This step also performs numerous validations on the source, + like making sure that all type references are correct and that sources don't try to define two elements with the same + name. + * [`options`](https://pkg.go.dev/github.com/bufbuild/protocompile/options): + This is the next stage of the compiler: interpreting options. The linked data structures that come from the previous + stage are used to validate and interpret all options. + * [`sourceinfo`](https://pkg.go.dev/github.com/bufbuild/protocompile/sourceinfo): + This is the last stage of the compiler: generating source code info. Source code info contains metadata that maps + elements in the descriptor to the location in the original source file from which it came. This includes access to + comments. In order to provide correct source info for options, it must happen last, after options have been + interpreted. + * [`reporter`](https://pkg.go.dev/github.com/bufbuild/protocompile/reporter): This package provides error types + generated by the compiler and interfaces used by the compiler to report errors and warnings to the calling code. + * [`walk`](https://pkg.go.dev/github.com/bufbuild/protocompile/walk): + This package provides functions for walking through all of the elements in a descriptor (or descriptor proto) + hierarchy. + * [`protoutil`](https://pkg.go.dev/github.com/bufbuild/protocompile/protoutil): + This package contains some other useful functions for interacting with Protobuf descriptors. + +### Migrating from `protoparse` + +There are a few differences between this repo and its predecessor, `github.com/jhump/protoreflect/desc/protoparse`. + +* If you want to include "standard imports", for the well-known files that are included with `protoc`, you have to do + so explicitly. To do this, wrap your resolver using `protocompile.WithStandardImports`. +* If you used `protoparse.FileContentsFromMap`, in this new repo you'll use a `protocompile.SourceResolver` and then use + `protocompile.SourceAccessorFromMap` as its accessor function. +* If you used `Parser.ParseToAST`, you won't use the `protocompile` package but instead directly use `parser.Parse` in + this repo's `parser` sub-package. This returns an AST for the given file contents. +* If you used `Parser.ParseFilesButDoNotLink`, that is still possible in this repo, but not provided directly via a + single function. Instead, you need to take a few steps: + 1. Parse the source using `parser.Parse`. Then use `parser.ResultFromAST` to construct a result that contains a file + descriptor proto. + 2. Interpret whatever options can be interpreted without linking using `options.InterpretUnlinkedOptions`. This may + leave some options in the descriptor proto uninterpreted (including all custom options). + 3. If you want source code info for the file, finally call `sourceinfo.GenerateSourceInfo` using the index returned + from the previous step and store that in the file descriptor proto. diff --git a/vendor/github.com/bufbuild/protocompile/ast/doc.go b/vendor/github.com/bufbuild/protocompile/ast/doc.go new file mode 100644 index 00000000..fac65c42 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/doc.go @@ -0,0 +1,75 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast defines types for modeling the AST (Abstract Syntax +// Tree) for the Protocol Buffers interface definition language. +// +// # Nodes +// +// All nodes of the tree implement the [Node] interface. Leaf nodes in the +// tree implement [TerminalNode], and all others implement [CompositeNode]. +// The root of the tree for a proto source file is a *[FileNode]. +// +// A [TerminalNode] represents a single lexical element, or [Token]. A +// [CompositeNode] represents a sub-tree of the AST and range of tokens. +// +// Position information is tracked using a *[FileInfo]. The lexer invokes its +// various Add* methods to add details as the file is tokenized. Storing +// the position information in the *[FileInfo], instead of in each AST node, +// allows the AST to have a much more compact representation. To extract +// detailed position information, you must use the NodeInfo method, available +// on either the *[FileInfo] which produced the node's items or the *[FileNode] +// root of the tree that contains the node. +// +// # Items, Tokens, and Comments +// +// An [Item] represents a lexical item, excluding whitespace. This can be +// either a [Token] or a [Comment]. +// +// Comments are not represented as nodes in the tree. Instead, they are +// attributed to terminal nodes in the tree. So, when lexing, comments +// are accumulated until the next non-comment token is found. The AST +// model in this package thus provides access to all comments in the +// file, regardless of location (unlike the SourceCodeInfo present in +// descriptor protos, which is lossy). The comments associated with a +// non-leaf/non-token node (i.e. a CompositeNode) come from the first +// and last nodes in its sub-tree, for leading and trailing comments +// respectively. +// +// A [Comment] value corresponds to a line ("//") or block ("/*") style +// comment in the source. These have no bearing on the grammar and are +// effectively ignored as the parser is determining the shape of the +// syntax tree. +// +// A [Token] value corresponds to a component of the grammar, that is +// used to produce an AST. They correspond to leaves in the AST (i.e. +// [TerminalNode]). +// +// The *[FileInfo] and *[FileNode] types provide methods for querying +// and iterating through all the items or tokens in the file. They also +// include a method for resolving an [Item] into a [Token] or [Comment]. +// +// # Factory Functions +// +// Creation of AST nodes should use the factory functions in this +// package instead of struct literals. Some factory functions accept +// optional arguments, which means the arguments can be nil. If nil +// values are provided for other (non-optional) arguments, the resulting +// node may be invalid and cause panics later in the program. +// +// This package defines numerous interfaces. However, user code should +// not attempt to implement any of them. Most consumers of an AST will +// not work correctly if they encounter concrete implementations other +// than the ones defined in this package. +package ast diff --git a/vendor/github.com/bufbuild/protocompile/ast/enum.go b/vendor/github.com/bufbuild/protocompile/ast/enum.go new file mode 100644 index 00000000..93edbcbd --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/enum.go @@ -0,0 +1,165 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// EnumNode represents an enum declaration. Example: +// +// enum Foo { BAR = 0; BAZ = 1 } +type EnumNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []EnumElement + CloseBrace *RuneNode +} + +func (*EnumNode) fileElement() {} +func (*EnumNode) msgElement() {} + +// NewEnumNode creates a new *EnumNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid enum, which must have at least one value. +// - keyword: The token corresponding to the "enum" keyword. +// - name: The token corresponding to the enum's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the enum body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewEnumNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []EnumElement, closeBrace *RuneNode) *EnumNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *EnumValueNode, *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid EnumElement type: %T", decl)) + } + children = append(children, decl) + } + children = append(children, closeBrace) + + return &EnumNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + CloseBrace: closeBrace, + Decls: decls, + } +} + +// EnumElement is an interface implemented by all AST nodes that can +// appear in the body of an enum declaration. +type EnumElement interface { + Node + enumElement() +} + +var _ EnumElement = (*OptionNode)(nil) +var _ EnumElement = (*EnumValueNode)(nil) +var _ EnumElement = (*ReservedNode)(nil) +var _ EnumElement = (*EmptyDeclNode)(nil) + +// EnumValueDeclNode is a placeholder interface for AST nodes that represent +// enum values. This allows NoSourceNode to be used in place of *EnumValueNode +// for some usages. +type EnumValueDeclNode interface { + Node + GetName() Node + GetNumber() Node +} + +var _ EnumValueDeclNode = (*EnumValueNode)(nil) +var _ EnumValueDeclNode = NoSourceNode{} + +// EnumNode represents an enum declaration. Example: +// +// UNSET = 0 [deprecated = true]; +type EnumValueNode struct { + compositeNode + Name *IdentNode + Equals *RuneNode + Number IntValueNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*EnumValueNode) enumElement() {} + +// NewEnumValueNode creates a new *EnumValueNode. All arguments must be non-nil +// except opts which is only non-nil if the declaration included options. +// - name: The token corresponding to the enum value's name. +// - equals: The token corresponding to the '=' rune after the name. +// - number: The token corresponding to the enum value's number. +// - opts: Optional set of enum value options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, opts *CompactOptionsNode, semicolon *RuneNode) *EnumValueNode { + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if number == nil { + panic("number is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 4 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name, equals, number) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + return &EnumValueNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Number: number, + Options: opts, + Semicolon: semicolon, + } +} + +func (e *EnumValueNode) GetName() Node { + return e.Name +} + +func (e *EnumValueNode) GetNumber() Node { + return e.Number +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/field.go b/vendor/github.com/bufbuild/protocompile/ast/field.go new file mode 100644 index 00000000..d4d3e392 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/field.go @@ -0,0 +1,672 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// FieldDeclNode is a node in the AST that defines a field. This includes +// normal message fields as well as extensions. There are multiple types +// of AST nodes that declare fields: +// - *FieldNode +// - *GroupNode +// - *MapFieldNode +// - *SyntheticMapField +// +// This also allows NoSourceNode and SyntheticMapField to be used in place of +// one of the above for some usages. +type FieldDeclNode interface { + Node + FieldLabel() Node + FieldName() Node + FieldType() Node + FieldTag() Node + FieldExtendee() Node + GetGroupKeyword() Node + GetOptions() *CompactOptionsNode +} + +var _ FieldDeclNode = (*FieldNode)(nil) +var _ FieldDeclNode = (*GroupNode)(nil) +var _ FieldDeclNode = (*MapFieldNode)(nil) +var _ FieldDeclNode = (*SyntheticMapField)(nil) +var _ FieldDeclNode = NoSourceNode{} + +// FieldNode represents a normal field declaration (not groups or maps). It +// can represent extension fields as well as non-extension fields (both inside +// of messages and inside of one-ofs). Example: +// +// optional string foo = 1; +type FieldNode struct { + compositeNode + Label FieldLabel + FldType IdentValueNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode + + // This is an up-link to the containing *ExtendNode for fields + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*FieldNode) msgElement() {} +func (*FieldNode) oneOfElement() {} +func (*FieldNode) extendElement() {} + +// NewFieldNode creates a new *FieldNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - fieldType: The token corresponding to the field's type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *FieldNode { + if fieldType == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 5 + if label != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, fieldType, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + + return &FieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + FldType: fieldType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *FieldNode) FieldLabel() Node { + // proto3 fields and fields inside one-ofs will not have a label and we need + // this check in order to return a nil node -- otherwise we'd return a + // non-nil node that has a nil pointer value in it :/ + if n.Label.KeywordNode == nil { + return nil + } + return n.Label.KeywordNode +} + +func (n *FieldNode) FieldName() Node { + return n.Name +} + +func (n *FieldNode) FieldType() Node { + return n.FldType +} + +func (n *FieldNode) FieldTag() Node { + return n.Tag +} + +func (n *FieldNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *FieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *FieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +// FieldLabel represents the label of a field, which indicates its cardinality +// (i.e. whether it is optional, required, or repeated). +type FieldLabel struct { + *KeywordNode + Repeated bool + Required bool +} + +func newFieldLabel(lbl *KeywordNode) FieldLabel { + repeated, required := false, false + if lbl != nil { + repeated = lbl.Val == "repeated" + required = lbl.Val == "required" + } + return FieldLabel{ + KeywordNode: lbl, + Repeated: repeated, + Required: required, + } +} + +// IsPresent returns true if a label keyword was present in the declaration +// and false if it was absent. +func (f *FieldLabel) IsPresent() bool { + return f.KeywordNode != nil +} + +// GroupNode represents a group declaration, which doubles as a field and inline +// message declaration. It can represent extension fields as well as +// non-extension fields (both inside of messages and inside of one-ofs). +// Example: +// +// optional group Key = 4 { +// optional uint64 id = 1; +// optional string name = 2; +// } +type GroupNode struct { + compositeNode + Label FieldLabel + Keyword *KeywordNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + MessageBody + + // This is an up-link to the containing *ExtendNode for groups + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*GroupNode) msgElement() {} +func (*GroupNode) oneOfElement() {} +func (*GroupNode) extendElement() {} + +// NewGroupNode creates a new *GroupNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - keyword: The token corresponding to the "group" keyword. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the group body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *GroupNode { + if keyword == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + numChildren := 6 + len(decls) + if label != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, keyword, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &GroupNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + Keyword: keyword, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *GroupNode) FieldLabel() Node { + if n.Label.KeywordNode == nil { + // return nil interface to indicate absence, not a typed nil + return nil + } + return n.Label.KeywordNode +} + +func (n *GroupNode) FieldName() Node { + return n.Name +} + +func (n *GroupNode) FieldType() Node { + return n.Keyword +} + +func (n *GroupNode) FieldTag() Node { + return n.Tag +} + +func (n *GroupNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *GroupNode) GetGroupKeyword() Node { + return n.Keyword +} + +func (n *GroupNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *GroupNode) MessageName() Node { + return n.Name +} + +// OneOfDeclNode is a node in the AST that defines a oneof. There are +// multiple types of AST nodes that declare oneofs: +// - *OneOfNode +// - *SyntheticOneOf +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type OneOfDeclNode interface { + Node + OneOfName() Node +} + +// OneOfNode represents a one-of declaration. Example: +// +// oneof query { +// string by_name = 2; +// Type by_type = 3; +// Address by_address = 4; +// Labels by_label = 5; +// } +type OneOfNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []OneOfElement + CloseBrace *RuneNode +} + +func (*OneOfNode) msgElement() {} + +// NewOneOfNode creates a new *OneOfNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid oneof, which must have at least one field. +// - keyword: The token corresponding to the "oneof" keyword. +// - name: The token corresponding to the oneof's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the oneof body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewOneOfNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneOfElement, closeBrace *RuneNode) *OneOfNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *FieldNode, *GroupNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid OneOfElement type: %T", decl)) + } + } + + return &OneOfNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *OneOfNode) OneOfName() Node { + return n.Name +} + +// OneOfElement is an interface implemented by all AST nodes that can +// appear in the body of a oneof declaration. +type OneOfElement interface { + Node + oneOfElement() +} + +var _ OneOfElement = (*OptionNode)(nil) +var _ OneOfElement = (*FieldNode)(nil) +var _ OneOfElement = (*GroupNode)(nil) +var _ OneOfElement = (*EmptyDeclNode)(nil) + +// SyntheticOneOf is not an actual node in the AST but a synthetic node +// that represents the oneof implied by a proto3 optional field. +type SyntheticOneOf struct { + Field *FieldNode +} + +var _ Node = (*SyntheticOneOf)(nil) + +// NewSyntheticOneOf creates a new *SyntheticOneOf that corresponds to the +// given proto3 optional field. +func NewSyntheticOneOf(field *FieldNode) *SyntheticOneOf { + return &SyntheticOneOf{Field: field} +} + +func (n *SyntheticOneOf) Start() Token { + return n.Field.Start() +} + +func (n *SyntheticOneOf) End() Token { + return n.Field.End() +} + +func (n *SyntheticOneOf) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticOneOf) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticOneOf) OneOfName() Node { + return n.Field.FieldName() +} + +// MapTypeNode represents the type declaration for a map field. It defines +// both the key and value types for the map. Example: +// +// map +type MapTypeNode struct { + compositeNode + Keyword *KeywordNode + OpenAngle *RuneNode + KeyType *IdentNode + Comma *RuneNode + ValueType IdentValueNode + CloseAngle *RuneNode +} + +// NewMapTypeNode creates a new *MapTypeNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "map" keyword. +// - openAngle: The token corresponding to the "<" rune after the keyword. +// - keyType: The token corresponding to the key type for the map. +// - comma: The token corresponding to the "," rune between key and value types. +// - valType: The token corresponding to the value type for the map. +// - closeAngle: The token corresponding to the ">" rune that ends the declaration. +func NewMapTypeNode(keyword *KeywordNode, openAngle *RuneNode, keyType *IdentNode, comma *RuneNode, valType IdentValueNode, closeAngle *RuneNode) *MapTypeNode { + if keyword == nil { + panic("keyword is nil") + } + if openAngle == nil { + panic("openAngle is nil") + } + if keyType == nil { + panic("keyType is nil") + } + if comma == nil { + panic("comma is nil") + } + if valType == nil { + panic("valType is nil") + } + if closeAngle == nil { + panic("closeAngle is nil") + } + children := []Node{keyword, openAngle, keyType, comma, valType, closeAngle} + return &MapTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + OpenAngle: openAngle, + KeyType: keyType, + Comma: comma, + ValueType: valType, + CloseAngle: closeAngle, + } +} + +// MapFieldNode represents a map field declaration. Example: +// +// map replacements = 3 [deprecated = true]; +type MapFieldNode struct { + compositeNode + MapType *MapTypeNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*MapFieldNode) msgElement() {} + +// NewMapFieldNode creates a new *MapFieldNode. All arguments must be non-nil +// except opts, which may be nil. +// - mapType: The token corresponding to the map type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewMapFieldNode(mapType *MapTypeNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *MapFieldNode { + if mapType == nil { + panic("mapType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 5 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, mapType, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + + return &MapFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + MapType: mapType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *MapFieldNode) FieldLabel() Node { + return nil +} + +func (n *MapFieldNode) FieldName() Node { + return n.Name +} + +func (n *MapFieldNode) FieldType() Node { + return n.MapType +} + +func (n *MapFieldNode) FieldTag() Node { + return n.Tag +} + +func (n *MapFieldNode) FieldExtendee() Node { + return nil +} + +func (n *MapFieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *MapFieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *MapFieldNode) MessageName() Node { + return n.Name +} + +func (n *MapFieldNode) KeyField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.KeyType, 1) +} + +func (n *MapFieldNode) ValueField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.ValueType, 2) +} + +// SyntheticMapField is not an actual node in the AST but a synthetic node +// that implements FieldDeclNode. These are used to represent the implicit +// field declarations of the "key" and "value" fields in a map entry. +type SyntheticMapField struct { + Ident IdentValueNode + Tag *UintLiteralNode +} + +// NewSyntheticMapField creates a new *SyntheticMapField for the given +// identifier (either a key or value type in a map declaration) and tag +// number (1 for key, 2 for value). +func NewSyntheticMapField(ident IdentValueNode, tagNum uint64) *SyntheticMapField { + tag := &UintLiteralNode{ + terminalNode: ident.Start().asTerminalNode(), + Val: tagNum, + } + return &SyntheticMapField{Ident: ident, Tag: tag} +} + +func (n *SyntheticMapField) Start() Token { + return n.Ident.Start() +} + +func (n *SyntheticMapField) End() Token { + return n.Ident.End() +} + +func (n *SyntheticMapField) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) FieldLabel() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldName() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldType() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldTag() Node { + return n.Tag +} + +func (n *SyntheticMapField) FieldExtendee() Node { + return nil +} + +func (n *SyntheticMapField) GetGroupKeyword() Node { + return nil +} + +func (n *SyntheticMapField) GetOptions() *CompactOptionsNode { + return nil +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/file.go b/vendor/github.com/bufbuild/protocompile/ast/file.go new file mode 100644 index 00000000..f12cca09 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/file.go @@ -0,0 +1,277 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// FileDeclNode is a placeholder interface for AST nodes that represent files. +// This allows NoSourceNode to be used in place of *FileNode for some usages. +type FileDeclNode interface { + Node + Name() string + GetSyntax() Node + NodeInfo(n Node) NodeInfo +} + +var _ FileDeclNode = (*FileNode)(nil) +var _ FileDeclNode = NoSourceNode{} + +// FileNode is the root of the AST hierarchy. It represents an entire +// protobuf source file. +type FileNode struct { + compositeNode + fileInfo *FileInfo + + Syntax *SyntaxNode // nil if file has no syntax declaration + Decls []FileElement + + // This synthetic node allows access to final comments and whitespace + EOF *RuneNode +} + +// NewFileNode creates a new *FileNode. The syntax parameter is optional. If it +// is absent, it means the file had no syntax declaration. +// +// This function panics if the concrete type of any element of decls is not +// from this package. +func NewFileNode(info *FileInfo, syntax *SyntaxNode, decls []FileElement, eof Token) *FileNode { + numChildren := len(decls) + 1 + if syntax != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if syntax != nil { + children = append(children, syntax) + } + for _, decl := range decls { + switch decl := decl.(type) { + case *PackageNode, *ImportNode, *OptionNode, *MessageNode, + *EnumNode, *ExtendNode, *ServiceNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid FileElement type: %T", decl)) + } + children = append(children, decl) + } + + eofNode := NewRuneNode(0, eof) + children = append(children, eofNode) + + return &FileNode{ + compositeNode: compositeNode{ + children: children, + }, + fileInfo: info, + Syntax: syntax, + Decls: decls, + EOF: eofNode, + } +} + +// NewEmptyFileNode returns an empty AST for a file with the given name. +func NewEmptyFileNode(filename string) *FileNode { + fileInfo := NewFileInfo(filename, []byte{}) + return NewFileNode(fileInfo, nil, nil, fileInfo.AddToken(0, 0)) +} + +func (f *FileNode) GetSyntax() Node { + return f.Syntax +} + +func (f *FileNode) Name() string { + return f.fileInfo.Name() +} + +func (f *FileNode) NodeInfo(n Node) NodeInfo { + return f.fileInfo.NodeInfo(n) +} + +func (f *FileNode) TokenInfo(t Token) NodeInfo { + return f.fileInfo.TokenInfo(t) +} + +func (f *FileNode) ItemInfo(i Item) ItemInfo { + return f.fileInfo.ItemInfo(i) +} + +func (f *FileNode) GetItem(i Item) (Token, Comment) { + return f.fileInfo.GetItem(i) +} + +func (f *FileNode) Items() Sequence[Item] { + return f.fileInfo.Items() +} + +func (f *FileNode) Tokens() Sequence[Token] { + return f.fileInfo.Tokens() +} + +// FileElement is an interface implemented by all AST nodes that are +// allowed as top-level declarations in the file. +type FileElement interface { + Node + fileElement() +} + +var _ FileElement = (*ImportNode)(nil) +var _ FileElement = (*PackageNode)(nil) +var _ FileElement = (*OptionNode)(nil) +var _ FileElement = (*MessageNode)(nil) +var _ FileElement = (*EnumNode)(nil) +var _ FileElement = (*ExtendNode)(nil) +var _ FileElement = (*ServiceNode)(nil) +var _ FileElement = (*EmptyDeclNode)(nil) + +// SyntaxNode represents a syntax declaration, which if present must be +// the first non-comment content. Example: +// +// syntax = "proto2"; +// +// Files that don't have a syntax node are assumed to use proto2 syntax. +type SyntaxNode struct { + compositeNode + Keyword *KeywordNode + Equals *RuneNode + Syntax StringValueNode + Semicolon *RuneNode +} + +// NewSyntaxNode creates a new *SyntaxNode. All four arguments must be non-nil: +// - keyword: The token corresponding to the "syntax" keyword. +// - equals: The token corresponding to the "=" rune. +// - syntax: The actual syntax value, e.g. "proto2" or "proto3". +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNode, semicolon *RuneNode) *SyntaxNode { + if keyword == nil { + panic("keyword is nil") + } + if equals == nil { + panic("equals is nil") + } + if syntax == nil { + panic("syntax is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, equals, syntax, semicolon} + return &SyntaxNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Equals: equals, + Syntax: syntax, + Semicolon: semicolon, + } +} + +// ImportNode represents an import statement. Example: +// +// import "google/protobuf/empty.proto"; +type ImportNode struct { + compositeNode + Keyword *KeywordNode + // Optional; if present indicates this is a public import + Public *KeywordNode + // Optional; if present indicates this is a weak import + Weak *KeywordNode + Name StringValueNode + Semicolon *RuneNode +} + +// NewImportNode creates a new *ImportNode. The public and weak arguments are optional +// and only one or the other (or neither) may be specified, not both. When public is +// non-nil, it indicates the "public" keyword in the import statement and means this is +// a public import. When weak is non-nil, it indicates the "weak" keyword in the import +// statement and means this is a weak import. When both are nil, this is a normal import. +// The other arguments must be non-nil: +// - keyword: The token corresponding to the "import" keyword. +// - public: The token corresponding to the optional "public" keyword. +// - weak: The token corresponding to the optional "weak" keyword. +// - name: The actual imported file name. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, name StringValueNode, semicolon *RuneNode) *ImportNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 3 + if public != nil || weak != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + if public != nil { + children = append(children, public) + } else if weak != nil { + children = append(children, weak) + } + children = append(children, name, semicolon) + + return &ImportNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Public: public, + Weak: weak, + Name: name, + Semicolon: semicolon, + } +} + +func (*ImportNode) fileElement() {} + +// PackageNode represents a package declaration. Example: +// +// package foobar.com; +type PackageNode struct { + compositeNode + Keyword *KeywordNode + Name IdentValueNode + Semicolon *RuneNode +} + +func (*PackageNode) fileElement() {} + +// NewPackageNode creates a new *PackageNode. All three arguments must be non-nil: +// - keyword: The token corresponding to the "package" keyword. +// - name: The package name declared for the file. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewPackageNode(keyword *KeywordNode, name IdentValueNode, semicolon *RuneNode) *PackageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, semicolon} + return &PackageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/file_info.go b/vendor/github.com/bufbuild/protocompile/ast/file_info.go new file mode 100644 index 00000000..1de93457 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/file_info.go @@ -0,0 +1,676 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "sort" + "unicode/utf8" +) + +// FileInfo contains information about the contents of a source file, including +// details about comments and items. A lexer accumulates these details as it +// scans the file contents. This allows efficient representation of things like +// source positions. +type FileInfo struct { + // The name of the source file. + name string + // The raw contents of the source file. + data []byte + // The offsets for each line in the file. The value is the zero-based byte + // offset for a given line. The line is given by its index. So the value at + // index 0 is the offset for the first line (which is always zero). The + // value at index 1 is the offset at which the second line begins. Etc. + lines []int + // The info for every comment in the file. This is empty if the file has no + // comments. The first entry corresponds to the first comment in the file, + // and so on. + comments []commentInfo + // The info for every lexed item in the file. The last item in the slice + // corresponds to the EOF, so every file (even an empty one) should have at + // least one entry. This includes all terminal symbols (tokens) in the AST + // as well as all comments. + items []itemSpan +} + +type commentInfo struct { + // the index of the item, in the file's items slice, that represents this + // comment + index int + // the index of the token to which this comment is attributed. + attributedToIndex int +} + +type itemSpan struct { + // the offset into the file of the first character of an item. + offset int + // the length of the item + length int +} + +// NewFileInfo creates a new instance for the given file. +func NewFileInfo(filename string, contents []byte) *FileInfo { + return &FileInfo{ + name: filename, + data: contents, + lines: []int{0}, + } +} + +func (f *FileInfo) Name() string { + return f.name +} + +// AddLine adds the offset representing the beginning of the "next" line in the file. +// The first line always starts at offset 0, the second line starts at offset-of-newline-char+1. +func (f *FileInfo) AddLine(offset int) { + if offset < 0 { + panic(fmt.Sprintf("invalid offset: %d must not be negative", offset)) + } + if offset > len(f.data) { + panic(fmt.Sprintf("invalid offset: %d is greater than file size %d", offset, len(f.data))) + } + + if len(f.lines) > 0 { + lastOffset := f.lines[len(f.lines)-1] + if offset <= lastOffset { + panic(fmt.Sprintf("invalid offset: %d is not greater than previously observed line offset %d", offset, lastOffset)) + } + } + + f.lines = append(f.lines, offset) +} + +// AddToken adds info about a token at the given location to this file. It +// returns a value that allows access to all of the token's details. +func (f *FileInfo) AddToken(offset, length int) Token { + if offset < 0 { + panic(fmt.Sprintf("invalid offset: %d must not be negative", offset)) + } + if length < 0 { + panic(fmt.Sprintf("invalid length: %d must not be negative", length)) + } + if offset+length > len(f.data) { + panic(fmt.Sprintf("invalid offset+length: %d is greater than file size %d", offset+length, len(f.data))) + } + + tokenID := len(f.items) + if len(f.items) > 0 { + lastToken := f.items[tokenID-1] + lastEnd := lastToken.offset + lastToken.length - 1 + if offset <= lastEnd { + panic(fmt.Sprintf("invalid offset: %d is not greater than previously observed token end %d", offset, lastEnd)) + } + } + + f.items = append(f.items, itemSpan{offset: offset, length: length}) + return Token(tokenID) +} + +// AddComment adds info about a comment to this file. Comments must first be +// added as items via f.AddToken(). The given comment argument is the Token +// from that step. The given attributedTo argument indicates another token in the +// file with which the comment is associated. If comment's offset is before that +// of attributedTo, then this is a leading comment. Otherwise, it is a trailing +// comment. +func (f *FileInfo) AddComment(comment, attributedTo Token) Comment { + if len(f.comments) > 0 { + lastComment := f.comments[len(f.comments)-1] + if int(comment) <= lastComment.index { + panic(fmt.Sprintf("invalid index: %d is not greater than previously observed comment index %d", comment, lastComment.index)) + } + if int(attributedTo) < lastComment.attributedToIndex { + panic(fmt.Sprintf("invalid attribution: %d is not greater than previously observed comment attribution index %d", attributedTo, lastComment.attributedToIndex)) + } + } + + f.comments = append(f.comments, commentInfo{index: int(comment), attributedToIndex: int(attributedTo)}) + return Comment{ + fileInfo: f, + index: len(f.comments) - 1, + } +} + +// NodeInfo returns details from the original source for the given AST node. +// +// If the given n is out of range, this returns an invalid NodeInfo (i.e. +// nodeInfo.IsValid() returns false). If the given n is not out of range but +// also from a different file than f, then the result is undefined. +func (f *FileInfo) NodeInfo(n Node) NodeInfo { + return f.nodeInfo(int(n.Start()), int(n.End())) +} + +// TokenInfo returns details from the original source for the given token. +// +// If the given t is out of range, this returns an invalid NodeInfo (i.e. +// nodeInfo.IsValid() returns false). If the given t is not out of range but +// also from a different file than f, then the result is undefined. +func (f *FileInfo) TokenInfo(t Token) NodeInfo { + return f.nodeInfo(int(t), int(t)) +} + +func (f *FileInfo) nodeInfo(start, end int) NodeInfo { + if start < 0 || start >= len(f.items) { + return NodeInfo{} + } + if end < 0 || end >= len(f.items) { + return NodeInfo{} + } + return NodeInfo{fileInfo: f, startIndex: start, endIndex: end} +} + +// ItemInfo returns details from the original source for the given item. +// +// If the given i is out of range, this returns nil. If the given i is not +// out of range but also from a different file than f, then the result is +// undefined. +func (f *FileInfo) ItemInfo(i Item) ItemInfo { + tok, cmt := f.GetItem(i) + if tok != TokenError { + return f.TokenInfo(tok) + } + if cmt.IsValid() { + return cmt + } + return nil +} + +// GetItem returns the token or comment represented by the given item. Only one +// of the return values will be valid. If the item is a token then the returned +// comment will be a zero value and thus invalid (i.e. comment.IsValid() returns +// false). If the item is a comment then the returned token will be TokenError. +// +// If the given i is out of range, this returns (TokenError, Comment{}). If the +// given i is not out of range but also from a different file than f, then +// the result is undefined. +func (f *FileInfo) GetItem(i Item) (Token, Comment) { + if i < 0 || int(i) >= len(f.items) { + return TokenError, Comment{} + } + if !f.isComment(i) { + return Token(i), Comment{} + } + // It's a comment, so find its location in f.comments + c := sort.Search(len(f.comments), func(c int) bool { + return f.comments[c].index >= int(i) + }) + if c < len(f.comments) && f.comments[c].index == int(i) { + return TokenError, Comment{fileInfo: f, index: c} + } + // f.isComment(i) returned true, but we couldn't find it + // in f.comments? Uh oh... that shouldn't be possible. + return TokenError, Comment{} +} + +func (f *FileInfo) isDummyFile() bool { + return f == nil || f.lines == nil +} + +// Sequence represents a navigable sequence of elements. +type Sequence[T any] interface { + // First returns the first element in the sequence. The bool return + // is false if this sequence contains no elements. For example, an + // empty file has no items or tokens. + First() (T, bool) + // Next returns the next element in the sequence that comes after + // the given element. The bool returns is false if there is no next + // item (i.e. the given element is the last one). It also returns + // false if the given element is invalid. + Next(T) (T, bool) + // Last returns the last element in the sequence. The bool return + // is false if this sequence contains no elements. For example, an + // empty file has no items or tokens. + Last() (T, bool) + // Previous returns the previous element in the sequence that comes + // before the given element. The bool returns is false if there is no + // previous item (i.e. the given element is the first one). It also + // returns false if the given element is invalid. + Previous(T) (T, bool) +} + +func (f *FileInfo) Items() Sequence[Item] { + return items{fileInfo: f} +} + +func (f *FileInfo) Tokens() Sequence[Token] { + return tokens{fileInfo: f} +} + +type items struct { + fileInfo *FileInfo +} + +func (i items) First() (Item, bool) { + if len(i.fileInfo.items) == 0 { + return 0, false + } + return 0, true +} + +func (i items) Next(item Item) (Item, bool) { + if item < 0 || int(item) >= len(i.fileInfo.items)-1 { + return 0, false + } + return i.fileInfo.itemForward(item+1, true) +} + +func (i items) Last() (Item, bool) { + if len(i.fileInfo.items) == 0 { + return 0, false + } + return Item(len(i.fileInfo.items) - 1), true +} + +func (i items) Previous(item Item) (Item, bool) { + if item <= 0 || int(item) >= len(i.fileInfo.items) { + return 0, false + } + return i.fileInfo.itemBackward(item-1, true) +} + +type tokens struct { + fileInfo *FileInfo +} + +func (t tokens) First() (Token, bool) { + i, ok := t.fileInfo.itemForward(0, false) + return Token(i), ok +} + +func (t tokens) Next(tok Token) (Token, bool) { + if tok < 0 || int(tok) >= len(t.fileInfo.items)-1 { + return 0, false + } + i, ok := t.fileInfo.itemForward(Item(tok+1), false) + return Token(i), ok +} + +func (t tokens) Last() (Token, bool) { + i, ok := t.fileInfo.itemBackward(Item(len(t.fileInfo.items))-1, false) + return Token(i), ok +} + +func (t tokens) Previous(tok Token) (Token, bool) { + if tok <= 0 || int(tok) >= len(t.fileInfo.items) { + return 0, false + } + i, ok := t.fileInfo.itemBackward(Item(tok-1), false) + return Token(i), ok +} + +func (f *FileInfo) itemForward(i Item, allowComment bool) (Item, bool) { + end := Item(len(f.items)) + for i < end { + if allowComment || !f.isComment(i) { + return i, true + } + i++ + } + return 0, false +} + +func (f *FileInfo) itemBackward(i Item, allowComment bool) (Item, bool) { + for i >= 0 { + if allowComment || !f.isComment(i) { + return i, true + } + i-- + } + return 0, false +} + +// isComment is comment returns true if i refers to a comment. +// (If it returns false, i refers to a token.) +func (f *FileInfo) isComment(i Item) bool { + item := f.items[i] + if item.length < 2 { + return false + } + // see if item text starts with "//" or "/*" + if f.data[item.offset] != '/' { + return false + } + c := f.data[item.offset+1] + return c == '/' || c == '*' +} + +func (f *FileInfo) SourcePos(offset int) SourcePos { + lineNumber := sort.Search(len(f.lines), func(n int) bool { + return f.lines[n] > offset + }) + + // If it weren't for tabs and multi-byte unicode characters, we + // could trivially compute the column just based on offset and the + // starting offset of lineNumber :( + // Wish this were more efficient... that would require also storing + // computed line+column information, which would triple the size of + // f's items slice... + col := 0 + for i := f.lines[lineNumber-1]; i < offset; i++ { + if f.data[i] == '\t' { + nextTabStop := 8 - (col % 8) + col += nextTabStop + } else if utf8.RuneStart(f.data[i]) { + col++ + } + } + + return SourcePos{ + Filename: f.name, + Offset: offset, + Line: lineNumber, + // Columns are 1-indexed in this AST + Col: col + 1, + } +} + +// Token represents a single lexed token. +type Token int + +// TokenError indicates an invalid token. It is returned from query +// functions when no valid token satisfies the request. +const TokenError = Token(-1) + +// AsItem returns the Item that corresponds to t. +func (t Token) AsItem() Item { + return Item(t) +} + +func (t Token) asTerminalNode() terminalNode { + return terminalNode(t) +} + +// Item represents an item lexed from source. It represents either +// a Token or a Comment. +type Item int + +// ItemInfo provides details about an item's location in the source file and +// its contents. +type ItemInfo interface { + Start() SourcePos + End() SourcePos + LeadingWhitespace() string + RawText() string +} + +// NodeInfo represents the details for a node or token in the source file's AST. +// It provides access to information about the node's location in the source +// file. It also provides access to the original text in the source file (with +// all the original formatting intact) and also provides access to surrounding +// comments. +type NodeInfo struct { + fileInfo *FileInfo + startIndex, endIndex int +} + +var _ ItemInfo = NodeInfo{} + +// IsValid returns true if this node info is valid. If n is a zero-value struct, +// it is not valid. +func (n NodeInfo) IsValid() bool { + return n.fileInfo != nil +} + +// Start returns the starting position of the element. This is the first +// character of the node or token. +func (n NodeInfo) Start() SourcePos { + if n.fileInfo.isDummyFile() { + return UnknownPos(n.fileInfo.name) + } + + tok := n.fileInfo.items[n.startIndex] + return n.fileInfo.SourcePos(tok.offset) +} + +// End returns the ending position of the element, exclusive. This is the +// location after the last character of the node or token. If n returns +// the same position for Start() and End(), the element in source had a +// length of zero (which should only happen for the special EOF token +// that designates the end of the file). +func (n NodeInfo) End() SourcePos { + if n.fileInfo.isDummyFile() { + return UnknownPos(n.fileInfo.name) + } + + tok := n.fileInfo.items[n.endIndex] + // find offset of last character in the span + offset := tok.offset + if tok.length > 0 { + offset += tok.length - 1 + } + pos := n.fileInfo.SourcePos(offset) + if tok.length > 0 { + // We return "open range", so end is the position *after* the + // last character in the span. So we adjust + pos.Col++ + } + return pos +} + +// LeadingWhitespace returns any whitespace prior to the element. If there +// were comments in between this element and the previous one, this will +// return the whitespace between the last such comment in the element. If +// there were no such comments, this returns the whitespace between the +// previous element and the current one. +func (n NodeInfo) LeadingWhitespace() string { + if n.fileInfo.isDummyFile() { + return "" + } + + tok := n.fileInfo.items[n.startIndex] + var prevEnd int + if n.startIndex > 0 { + prevTok := n.fileInfo.items[n.startIndex-1] + prevEnd = prevTok.offset + prevTok.length + } + return string(n.fileInfo.data[prevEnd:tok.offset]) +} + +// LeadingComments returns all comments in the source that exist between the +// element and the previous element, except for any trailing comment on the +// previous element. +func (n NodeInfo) LeadingComments() Comments { + if n.fileInfo.isDummyFile() { + return EmptyComments + } + + start := sort.Search(len(n.fileInfo.comments), func(i int) bool { + return n.fileInfo.comments[i].attributedToIndex >= n.startIndex + }) + + if start == len(n.fileInfo.comments) || n.fileInfo.comments[start].attributedToIndex != n.startIndex { + // no comments associated with this token + return EmptyComments + } + + numComments := 0 + for i := start; i < len(n.fileInfo.comments); i++ { + comment := n.fileInfo.comments[i] + if comment.attributedToIndex == n.startIndex && + comment.index < n.startIndex { + numComments++ + } else { + break + } + } + + return Comments{ + fileInfo: n.fileInfo, + first: start, + num: numComments, + } +} + +// TrailingComments returns the trailing comment for the element, if any. +// An element will have a trailing comment only if it is the last token +// on a line and is followed by a comment on the same line. Typically, the +// following comment is a line-style comment (starting with "//"). +// +// If the following comment is a block-style comment that spans multiple +// lines, and the next token is on the same line as the end of the comment, +// the comment is NOT considered a trailing comment. +// +// Examples: +// +// foo // this is a trailing comment for foo +// +// bar /* this is a trailing comment for bar */ +// +// baz /* this is a trailing +// comment for baz */ +// +// fizz /* this is NOT a trailing +// comment for fizz because +// its on the same line as the +// following token buzz */ buzz +func (n NodeInfo) TrailingComments() Comments { + if n.fileInfo.isDummyFile() { + return EmptyComments + } + + start := sort.Search(len(n.fileInfo.comments), func(i int) bool { + comment := n.fileInfo.comments[i] + return comment.attributedToIndex >= n.endIndex && + comment.index > n.endIndex + }) + + if start == len(n.fileInfo.comments) || n.fileInfo.comments[start].attributedToIndex != n.endIndex { + // no comments associated with this token + return EmptyComments + } + + numComments := 0 + for i := start; i < len(n.fileInfo.comments); i++ { + comment := n.fileInfo.comments[i] + if comment.attributedToIndex == n.endIndex { + numComments++ + } else { + break + } + } + + return Comments{ + fileInfo: n.fileInfo, + first: start, + num: numComments, + } +} + +// RawText returns the actual text in the source file that corresponds to the +// element. If the element is a node in the AST that encompasses multiple +// items (like an entire declaration), the full text of all items is returned +// including any interior whitespace and comments. +func (n NodeInfo) RawText() string { + startTok := n.fileInfo.items[n.startIndex] + endTok := n.fileInfo.items[n.endIndex] + return string(n.fileInfo.data[startTok.offset : endTok.offset+endTok.length]) +} + +// SourcePos identifies a location in a proto source file. +type SourcePos struct { + Filename string + // The line and column numbers for this position. These are + // one-based, so the first line and column is 1 (not zero). If + // either is zero, then the line and column are unknown and + // only the file name is known. + Line, Col int + // The offset, in bytes, from the beginning of the file. This + // is zero-based: the first character in the file is offset zero. + Offset int +} + +func (pos SourcePos) String() string { + if pos.Line <= 0 || pos.Col <= 0 { + return pos.Filename + } + return fmt.Sprintf("%s:%d:%d", pos.Filename, pos.Line, pos.Col) +} + +// Comments represents a range of sequential comments in a source file +// (e.g. no interleaving items or AST nodes). +type Comments struct { + fileInfo *FileInfo + first, num int +} + +// EmptyComments is an empty set of comments. +var EmptyComments = Comments{} + +// Len returns the number of comments in c. +func (c Comments) Len() int { + return c.num +} + +func (c Comments) Index(i int) Comment { + if i < 0 || i >= c.num { + panic(fmt.Sprintf("index %d out of range (len = %d)", i, c.num)) + } + return Comment{ + fileInfo: c.fileInfo, + index: c.first + i, + } +} + +// Comment represents a single comment in a source file. It indicates +// the position of the comment and its contents. A single comment means +// one line-style comment ("//" to end of line) or one block comment +// ("/*" through "*/"). If a longer comment uses multiple line comments, +// each line is considered to be a separate comment. For example: +// +// // This is a single comment, and +// // this is a separate comment. +type Comment struct { + fileInfo *FileInfo + index int +} + +var _ ItemInfo = Comment{} + +// IsValid returns true if this comment is valid. If this comment is +// a zero-value struct, it is not valid. +func (c Comment) IsValid() bool { + return c.fileInfo != nil && c.index >= 0 +} + +// AsItem returns the Item that corresponds to c. +func (c Comment) AsItem() Item { + return Item(c.fileInfo.comments[c.index].index) +} + +func (c Comment) Start() SourcePos { + span := c.fileInfo.items[c.AsItem()] + return c.fileInfo.SourcePos(span.offset) +} + +func (c Comment) End() SourcePos { + span := c.fileInfo.items[c.AsItem()] + return c.fileInfo.SourcePos(span.offset + span.length - 1) +} + +func (c Comment) LeadingWhitespace() string { + item := c.AsItem() + span := c.fileInfo.items[item] + var prevEnd int + if item > 0 { + prevItem := c.fileInfo.items[item-1] + prevEnd = prevItem.offset + prevItem.length + } + return string(c.fileInfo.data[prevEnd:span.offset]) +} + +func (c Comment) RawText() string { + span := c.fileInfo.items[c.AsItem()] + return string(c.fileInfo.data[span.offset : span.offset+span.length]) +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/identifiers.go b/vendor/github.com/bufbuild/protocompile/ast/identifiers.go new file mode 100644 index 00000000..27599929 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/identifiers.go @@ -0,0 +1,148 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "strings" +) + +// Identifier is a possibly-qualified name. This is used to distinguish +// ValueNode values that are references/identifiers vs. those that are +// string literals. +type Identifier string + +// IdentValueNode is an AST node that represents an identifier. +type IdentValueNode interface { + ValueNode + AsIdentifier() Identifier +} + +var _ IdentValueNode = (*IdentNode)(nil) +var _ IdentValueNode = (*CompoundIdentNode)(nil) + +// IdentNode represents a simple, unqualified identifier. These are used to name +// elements declared in a protobuf file or to refer to elements. Example: +// +// foobar +type IdentNode struct { + terminalNode + Val string +} + +// NewIdentNode creates a new *IdentNode. The given val is the identifier text. +func NewIdentNode(val string, tok Token) *IdentNode { + return &IdentNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *IdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *IdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// ToKeyword is used to convert identifiers to keywords. Since keywords are not +// reserved in the protobuf language, they are initially lexed as identifiers +// and then converted to keywords based on context. +func (n *IdentNode) ToKeyword() *KeywordNode { + return (*KeywordNode)(n) +} + +// CompoundIdentNode represents a qualified identifier. A qualified identifier +// has at least one dot and possibly multiple identifier names (all separated by +// dots). If the identifier has a leading dot, then it is a *fully* qualified +// identifier. Example: +// +// .com.foobar.Baz +type CompoundIdentNode struct { + compositeNode + // Optional leading dot, indicating that the identifier is fully qualified. + LeadingDot *RuneNode + Components []*IdentNode + // Dots[0] is the dot after Components[0]. The length of Dots is always + // one less than the length of Components. + Dots []*RuneNode + // The text value of the identifier, with all components and dots + // concatenated. + Val string +} + +// NewCompoundIdentNode creates a *CompoundIdentNode. The leadingDot may be nil. +// The dots arg must have a length that is one less than the length of +// components. The components arg must not be empty. +func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots []*RuneNode) *CompoundIdentNode { + if len(components) == 0 { + panic("must have at least one component") + } + if len(dots) != len(components)-1 { + panic(fmt.Sprintf("%d components requires %d dots, not %d", len(components), len(components)-1, len(dots))) + } + numChildren := len(components)*2 - 1 + if leadingDot != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + var b strings.Builder + if leadingDot != nil { + children = append(children, leadingDot) + b.WriteRune(leadingDot.Rune) + } + for i, comp := range components { + if i > 0 { + dot := dots[i-1] + children = append(children, dot) + b.WriteRune(dot.Rune) + } + children = append(children, comp) + b.WriteString(comp.Val) + } + return &CompoundIdentNode{ + compositeNode: compositeNode{ + children: children, + }, + LeadingDot: leadingDot, + Components: components, + Dots: dots, + Val: b.String(), + } +} + +func (n *CompoundIdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *CompoundIdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// KeywordNode is an AST node that represents a keyword. Keywords are +// like identifiers, but they have special meaning in particular contexts. +// Example: +// +// message +type KeywordNode IdentNode + +// NewKeywordNode creates a new *KeywordNode. The given val is the keyword. +func NewKeywordNode(val string, tok Token) *KeywordNode { + return &KeywordNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/message.go b/vendor/github.com/bufbuild/protocompile/ast/message.go new file mode 100644 index 00000000..00dfe45f --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/message.go @@ -0,0 +1,213 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// MessageDeclNode is a node in the AST that defines a message type. This +// includes normal message fields as well as implicit messages: +// - *MessageNode +// - *GroupNode (the group is a field and inline message type) +// - *MapFieldNode (map fields implicitly define a MapEntry message type) +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type MessageDeclNode interface { + Node + MessageName() Node +} + +var _ MessageDeclNode = (*MessageNode)(nil) +var _ MessageDeclNode = (*GroupNode)(nil) +var _ MessageDeclNode = (*MapFieldNode)(nil) +var _ MessageDeclNode = NoSourceNode{} + +// MessageNode represents a message declaration. Example: +// +// message Foo { +// string name = 1; +// repeated string labels = 2; +// bytes extra = 3; +// } +type MessageNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + MessageBody +} + +func (*MessageNode) fileElement() {} +func (*MessageNode) msgElement() {} + +// NewMessageNode creates a new *MessageNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "message" keyword. +// - name: The token corresponding to the field's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewMessageNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *MessageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &MessageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *MessageNode) MessageName() Node { + return n.Name +} + +// MessageBody represents the body of a message. It is used by both +// MessageNodes and GroupNodes. +type MessageBody struct { + OpenBrace *RuneNode + Decls []MessageElement + CloseBrace *RuneNode +} + +func populateMessageBody(m *MessageBody, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) { + m.OpenBrace = openBrace + m.Decls = decls + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneOfNode, + *MessageNode, *EnumNode, *ExtendNode, *ExtensionRangeNode, + *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid MessageElement type: %T", decl)) + } + } + m.CloseBrace = closeBrace +} + +// MessageElement is an interface implemented by all AST nodes that can +// appear in a message body. +type MessageElement interface { + Node + msgElement() +} + +var _ MessageElement = (*OptionNode)(nil) +var _ MessageElement = (*FieldNode)(nil) +var _ MessageElement = (*MapFieldNode)(nil) +var _ MessageElement = (*OneOfNode)(nil) +var _ MessageElement = (*GroupNode)(nil) +var _ MessageElement = (*MessageNode)(nil) +var _ MessageElement = (*EnumNode)(nil) +var _ MessageElement = (*ExtendNode)(nil) +var _ MessageElement = (*ExtensionRangeNode)(nil) +var _ MessageElement = (*ReservedNode)(nil) +var _ MessageElement = (*EmptyDeclNode)(nil) + +// ExtendNode represents a declaration of extension fields. Example: +// +// extend google.protobuf.FieldOptions { +// bool redacted = 33333; +// } +type ExtendNode struct { + compositeNode + Keyword *KeywordNode + Extendee IdentValueNode + OpenBrace *RuneNode + Decls []ExtendElement + CloseBrace *RuneNode +} + +func (*ExtendNode) fileElement() {} +func (*ExtendNode) msgElement() {} + +// NewExtendNode creates a new *ExtendNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "extend" keyword. +// - extendee: The token corresponding to the name of the extended message. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewExtendNode(keyword *KeywordNode, extendee IdentValueNode, openBrace *RuneNode, decls []ExtendElement, closeBrace *RuneNode) *ExtendNode { + if keyword == nil { + panic("keyword is nil") + } + if extendee == nil { + panic("extendee is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, extendee, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &ExtendNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Extendee: extendee, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } + for _, decl := range decls { + switch decl := decl.(type) { + case *FieldNode: + decl.Extendee = ret + case *GroupNode: + decl.Extendee = ret + case *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ExtendElement type: %T", decl)) + } + } + return ret +} + +// ExtendElement is an interface implemented by all AST nodes that can +// appear in the body of an extends declaration. +type ExtendElement interface { + Node + extendElement() +} + +var _ ExtendElement = (*FieldNode)(nil) +var _ ExtendElement = (*GroupNode)(nil) +var _ ExtendElement = (*EmptyDeclNode)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/ast/no_source.go b/vendor/github.com/bufbuild/protocompile/ast/no_source.go new file mode 100644 index 00000000..d66d9395 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/no_source.go @@ -0,0 +1,119 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// UnknownPos is a placeholder position when only the source file +// name is known. +func UnknownPos(filename string) SourcePos { + return SourcePos{Filename: filename} +} + +// NoSourceNode is a placeholder AST node that implements numerous +// interfaces in this package. It can be used to represent an AST +// element for a file whose source is not available. +type NoSourceNode struct { + filename string +} + +// NewNoSourceNode creates a new NoSourceNode for the given filename. +func NewNoSourceNode(filename string) NoSourceNode { + return NoSourceNode{filename: filename} +} + +func (n NoSourceNode) Name() string { + return n.filename +} + +func (n NoSourceNode) Start() Token { + return 0 +} + +func (n NoSourceNode) End() Token { + return 0 +} + +func (n NoSourceNode) NodeInfo(Node) NodeInfo { + return NodeInfo{ + fileInfo: &FileInfo{name: n.filename}, + } +} + +func (n NoSourceNode) GetSyntax() Node { + return n +} + +func (n NoSourceNode) GetName() Node { + return n +} + +func (n NoSourceNode) GetValue() ValueNode { + return n +} + +func (n NoSourceNode) FieldLabel() Node { + return n +} + +func (n NoSourceNode) FieldName() Node { + return n +} + +func (n NoSourceNode) FieldType() Node { + return n +} + +func (n NoSourceNode) FieldTag() Node { + return n +} + +func (n NoSourceNode) FieldExtendee() Node { + return n +} + +func (n NoSourceNode) GetGroupKeyword() Node { + return n +} + +func (n NoSourceNode) GetOptions() *CompactOptionsNode { + return nil +} + +func (n NoSourceNode) RangeStart() Node { + return n +} + +func (n NoSourceNode) RangeEnd() Node { + return n +} + +func (n NoSourceNode) GetNumber() Node { + return n +} + +func (n NoSourceNode) MessageName() Node { + return n +} + +func (n NoSourceNode) GetInputType() Node { + return n +} + +func (n NoSourceNode) GetOutputType() Node { + return n +} + +func (n NoSourceNode) Value() interface{} { + return nil +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/node.go b/vendor/github.com/bufbuild/protocompile/ast/node.go new file mode 100644 index 00000000..63a76c79 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/node.go @@ -0,0 +1,139 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// Node is the interface implemented by all nodes in the AST. It +// provides information about the span of this AST node in terms +// of location in the source file. It also provides information +// about all prior comments (attached as leading comments) and +// optional subsequent comments (attached as trailing comments). +type Node interface { + Start() Token + End() Token +} + +// TerminalNode represents a leaf in the AST. These represent +// the items/lexemes in the protobuf language. Comments and +// whitespace are accumulated by the lexer and associated with +// the following lexed token. +type TerminalNode interface { + Node + Token() Token +} + +var _ TerminalNode = (*StringLiteralNode)(nil) +var _ TerminalNode = (*UintLiteralNode)(nil) +var _ TerminalNode = (*FloatLiteralNode)(nil) +var _ TerminalNode = (*IdentNode)(nil) +var _ TerminalNode = (*SpecialFloatLiteralNode)(nil) +var _ TerminalNode = (*KeywordNode)(nil) +var _ TerminalNode = (*RuneNode)(nil) + +// CompositeNode represents any non-terminal node in the tree. These +// are interior or root nodes and have child nodes. +type CompositeNode interface { + Node + // Children contains all AST nodes that are immediate children of this one. + Children() []Node +} + +// terminalNode contains bookkeeping shared by all TerminalNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the TerminalNode +// interface. +type terminalNode Token + +func (n terminalNode) Start() Token { + return Token(n) +} + +func (n terminalNode) End() Token { + return Token(n) +} + +func (n terminalNode) Token() Token { + return Token(n) +} + +// compositeNode contains bookkeeping shared by all CompositeNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the CompositeNode +// interface. +type compositeNode struct { + children []Node +} + +func (n *compositeNode) Children() []Node { + return n.children +} + +func (n *compositeNode) Start() Token { + return n.children[0].Start() +} + +func (n *compositeNode) End() Token { + return n.children[len(n.children)-1].End() +} + +// RuneNode represents a single rune in protobuf source. Runes +// are typically collected into items, but some runes stand on +// their own, such as punctuation/symbols like commas, semicolons, +// equals signs, open and close symbols (braces, brackets, angles, +// and parentheses), and periods/dots. +// TODO: make this more compact; if runes don't have attributed comments +// then we don't need a Token to represent them and only need an offset +// into the file's contents. +type RuneNode struct { + terminalNode + Rune rune +} + +// NewRuneNode creates a new *RuneNode with the given properties. +func NewRuneNode(r rune, tok Token) *RuneNode { + return &RuneNode{ + terminalNode: tok.asTerminalNode(), + Rune: r, + } +} + +// EmptyDeclNode represents an empty declaration in protobuf source. +// These amount to extra semicolons, with no actual content preceding +// the semicolon. +type EmptyDeclNode struct { + compositeNode + Semicolon *RuneNode +} + +// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must +// be non-nil. +func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode { + if semicolon == nil { + panic("semicolon is nil") + } + return &EmptyDeclNode{ + compositeNode: compositeNode{ + children: []Node{semicolon}, + }, + Semicolon: semicolon, + } +} + +func (e *EmptyDeclNode) fileElement() {} +func (e *EmptyDeclNode) msgElement() {} +func (e *EmptyDeclNode) extendElement() {} +func (e *EmptyDeclNode) oneOfElement() {} +func (e *EmptyDeclNode) enumElement() {} +func (e *EmptyDeclNode) serviceElement() {} +func (e *EmptyDeclNode) methodElement() {} diff --git a/vendor/github.com/bufbuild/protocompile/ast/options.go b/vendor/github.com/bufbuild/protocompile/ast/options.go new file mode 100644 index 00000000..497ad44b --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/options.go @@ -0,0 +1,373 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// OptionDeclNode is a placeholder interface for AST nodes that represent +// options. This allows NoSourceNode to be used in place of *OptionNode +// for some usages. +type OptionDeclNode interface { + Node + GetName() Node + GetValue() ValueNode +} + +var _ OptionDeclNode = (*OptionNode)(nil) +var _ OptionDeclNode = NoSourceNode{} + +// OptionNode represents the declaration of a single option for an element. +// It is used both for normal option declarations (start with "option" keyword +// and end with semicolon) and for compact options found in fields, enum values, +// and extension ranges. Example: +// +// option (custom.option) = "foo"; +type OptionNode struct { + compositeNode + Keyword *KeywordNode // absent for compact options + Name *OptionNameNode + Equals *RuneNode + Val ValueNode + Semicolon *RuneNode // absent for compact options +} + +func (n *OptionNode) fileElement() {} +func (n *OptionNode) msgElement() {} +func (n *OptionNode) oneOfElement() {} +func (n *OptionNode) enumElement() {} +func (n *OptionNode) serviceElement() {} +func (n *OptionNode) methodElement() {} + +// NewOptionNode creates a new *OptionNode for a full option declaration (as +// used in files, messages, oneofs, enums, services, and methods). All arguments +// must be non-nil. (Also see NewCompactOptionNode.) +// - keyword: The token corresponding to the "option" keyword. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewOptionNode(keyword *KeywordNode, name *OptionNameNode, equals *RuneNode, val ValueNode, semicolon *RuneNode) *OptionNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if val == nil { + panic("val is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, equals, val, semicolon} + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Equals: equals, + Val: val, + Semicolon: semicolon, + } +} + +// NewCompactOptionNode creates a new *OptionNode for a full compact declaration +// (as used in fields, enum values, and extension ranges). All arguments must be +// non-nil. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +func NewCompactOptionNode(name *OptionNameNode, equals *RuneNode, val ValueNode) *OptionNode { + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if val == nil { + panic("val is nil") + } + children := []Node{name, equals, val} + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Val: val, + } +} + +func (n *OptionNode) GetName() Node { + return n.Name +} + +func (n *OptionNode) GetValue() ValueNode { + return n.Val +} + +// OptionNameNode represents an option name or even a traversal through message +// types to name a nested option field. Example: +// +// (foo.bar).baz.(bob) +type OptionNameNode struct { + compositeNode + Parts []*FieldReferenceNode + // Dots represent the separating '.' characters between name parts. The + // length of this slice must be exactly len(Parts)-1, each item in Parts + // having a corresponding item in this slice *except the last* (since a + // trailing dot is not allowed). + // + // These do *not* include dots that are inside of an extension name. For + // example: (foo.bar).baz.(bob) has three parts: + // 1. (foo.bar) - an extension name + // 2. baz - a regular field in foo.bar + // 3. (bob) - an extension field in baz + // Note that the dot in foo.bar will thus not be present in Dots but is + // instead in Parts[0]. + Dots []*RuneNode +} + +// NewOptionNameNode creates a new *OptionNameNode. The dots arg must have a +// length that is one less than the length of parts. The parts arg must not be +// empty. +func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNameNode { + if len(parts) == 0 { + panic("must have at least one part") + } + if len(dots) != len(parts)-1 { + panic(fmt.Sprintf("%d parts requires %d dots, not %d", len(parts), len(parts)-1, len(dots))) + } + children := make([]Node, 0, len(parts)*2-1) + for i, part := range parts { + if part == nil { + panic(fmt.Sprintf("parts[%d] is nil", i)) + } + if i > 0 { + if dots[i-1] == nil { + panic(fmt.Sprintf("dots[%d] is nil", i-1)) + } + children = append(children, dots[i-1]) + } + children = append(children, part) + } + return &OptionNameNode{ + compositeNode: compositeNode{ + children: children, + }, + Parts: parts, + Dots: dots, + } +} + +// FieldReferenceNode is a reference to a field name. It can indicate a regular +// field (simple unqualified name), an extension field (possibly-qualified name +// that is enclosed either in brackets or parentheses), or an "any" type +// reference (a type URL in the form "server.host/fully.qualified.Name" that is +// enclosed in brackets). +// +// Extension names are used in options to refer to custom options (which are +// actually extensions), in which case the name is enclosed in parentheses "(" +// and ")". They can also be used to refer to extension fields of options. +// +// Extension names are also used in message literals to set extension fields, +// in which case the name is enclosed in square brackets "[" and "]". +// +// "Any" type references can only be used in message literals, and are not +// allowed in option names. They are always enclosed in square brackets. An +// "any" type reference is distinguished from an extension name by the presence +// of a slash, which must be present in an "any" type reference and must be +// absent in an extension name. +// +// Examples: +// +// foobar +// (foo.bar) +// [foo.bar] +// [type.googleapis.com/foo.bar] +type FieldReferenceNode struct { + compositeNode + Open *RuneNode // only present for extension names and "any" type references + + // only present for "any" type references + URLPrefix IdentValueNode + Slash *RuneNode + + Name IdentValueNode + + Close *RuneNode // only present for extension names and "any" type references +} + +// NewFieldReferenceNode creates a new *FieldReferenceNode for a regular field. +// The name arg must not be nil. +func NewFieldReferenceNode(name *IdentNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + children := []Node{name} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + } +} + +// NewExtensionFieldReferenceNode creates a new *FieldReferenceNode for an +// extension field. All args must be non-nil. The openSym and closeSym runes +// should be "(" and ")" or "[" and "]". +func NewExtensionFieldReferenceNode(openSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + children := []Node{openSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Name: name, + Close: closeSym, + } +} + +// NewAnyTypeReferenceNode creates a new *FieldReferenceNode for an "any" +// type reference. All args must be non-nil. The openSym and closeSym runes +// should be "[" and "]". The slashSym run should be "/". +func NewAnyTypeReferenceNode(openSym *RuneNode, urlPrefix IdentValueNode, slashSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if urlPrefix == nil { + panic("urlPrefix is nil") + } + if slashSym == nil { + panic("slashSym is nil") + } + children := []Node{openSym, urlPrefix, slashSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + URLPrefix: urlPrefix, + Slash: slashSym, + Name: name, + Close: closeSym, + } +} + +// IsExtension reports if this is an extension name or not (e.g. enclosed in +// punctuation, such as parentheses or brackets). +func (a *FieldReferenceNode) IsExtension() bool { + return a.Open != nil && a.Slash == nil +} + +// IsAnyTypeReference reports if this is an Any type reference. +func (a *FieldReferenceNode) IsAnyTypeReference() bool { + return a.Slash != nil +} + +func (a *FieldReferenceNode) Value() string { + if a.Open != nil { + if a.Slash != nil { + return string(a.Open.Rune) + string(a.URLPrefix.AsIdentifier()) + string(a.Slash.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } + return string(a.Open.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } + return string(a.Name.AsIdentifier()) +} + +// CompactOptionsNode represents a compact options declaration, as used with +// fields, enum values, and extension ranges. Example: +// +// [deprecated = true, json_name = "foo_bar"] +type CompactOptionsNode struct { + compositeNode + OpenBracket *RuneNode + Options []*OptionNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Options)-1, with each item + // in Options having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewCompactOptionsNode creates a *CompactOptionsNode. All args must be +// non-nil. The commas arg must have a length that is one less than the +// length of opts. The opts arg must not be empty. +func NewCompactOptionsNode(openBracket *RuneNode, opts []*OptionNode, commas []*RuneNode, closeBracket *RuneNode) *CompactOptionsNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(opts) == 0 { + panic("must have at least one part") + } + if len(commas) != len(opts)-1 { + panic(fmt.Sprintf("%d opts requires %d commas, not %d", len(opts), len(opts)-1, len(commas))) + } + children := make([]Node, 0, len(opts)*2+1) + children = append(children, openBracket) + for i, opt := range opts { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if opt == nil { + panic(fmt.Sprintf("opts[%d] is nil", i)) + } + children = append(children, opt) + } + children = append(children, closeBracket) + + return &CompactOptionsNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Options: opts, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (e *CompactOptionsNode) GetElements() []*OptionNode { + if e == nil { + return nil + } + return e.Options +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/ranges.go b/vendor/github.com/bufbuild/protocompile/ast/ranges.go new file mode 100644 index 00000000..453e5a06 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/ranges.go @@ -0,0 +1,319 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// ExtensionRangeNode represents an extension range declaration in an extendable +// message. Example: +// +// extensions 100 to max; +type ExtensionRangeNode struct { + compositeNode + Keyword *KeywordNode + Ranges []*RangeNode + // Commas represent the separating ',' characters between ranges. The + // length of this slice must be exactly len(Ranges)-1, each item in Ranges + // having a corresponding item in this slice *except the last* (since a + // trailing comma is not allowed). + Commas []*RuneNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (e *ExtensionRangeNode) msgElement() {} + +// NewExtensionRangeNode creates a new *ExtensionRangeNode. All args must be +// non-nil except opts, which may be nil. +// - keyword: The token corresponding to the "extends" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - opts: The node corresponding to options that apply to each of the ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewExtensionRangeNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, opts *CompactOptionsNode, semicolon *RuneNode) *ExtensionRangeNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + numChildren := len(ranges)*2 + 1 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + return &ExtensionRangeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Options: opts, + Semicolon: semicolon, + } +} + +// RangeDeclNode is a placeholder interface for AST nodes that represent +// numeric values. This allows NoSourceNode to be used in place of *RangeNode +// for some usages. +type RangeDeclNode interface { + Node + RangeStart() Node + RangeEnd() Node +} + +var _ RangeDeclNode = (*RangeNode)(nil) +var _ RangeDeclNode = NoSourceNode{} + +// RangeNode represents a range expression, used in both extension ranges and +// reserved ranges. Example: +// +// 1000 to max +type RangeNode struct { + compositeNode + StartVal IntValueNode + // if To is non-nil, then exactly one of EndVal or Max must also be non-nil + To *KeywordNode + // EndVal and Max are mutually exclusive + EndVal IntValueNode + Max *KeywordNode +} + +// NewRangeNode creates a new *RangeNode. The start argument must be non-nil. +// The to argument represents the "to" keyword. If present (i.e. if it is non-nil), +// then so must be exactly one of end or max. If max is non-nil, it indicates a +// "100 to max" style range. But if end is non-nil, the end of the range is a +// literal, such as "100 to 200". +func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, max *KeywordNode) *RangeNode { + if start == nil { + panic("start is nil") + } + numChildren := 1 + if to != nil { + if end == nil && max == nil { + panic("to is not nil, but end and max both are") + } + if end != nil && max != nil { + panic("end and max cannot be both non-nil") + } + numChildren = 3 + } else { + if end != nil { + panic("to is nil, but end is not") + } + if max != nil { + panic("to is nil, but max is not") + } + } + children := make([]Node, 0, numChildren) + children = append(children, start) + if to != nil { + children = append(children, to) + if end != nil { + children = append(children, end) + } else { + children = append(children, max) + } + } + return &RangeNode{ + compositeNode: compositeNode{ + children: children, + }, + StartVal: start, + To: to, + EndVal: end, + Max: max, + } +} + +func (n *RangeNode) RangeStart() Node { + return n.StartVal +} + +func (n *RangeNode) RangeEnd() Node { + if n.Max != nil { + return n.Max + } + if n.EndVal != nil { + return n.EndVal + } + return n.StartVal +} + +func (n *RangeNode) StartValue() interface{} { + return n.StartVal.Value() +} + +func (n *RangeNode) StartValueAsInt32(min, max int32) (int32, bool) { + return AsInt32(n.StartVal, min, max) +} + +func (n *RangeNode) EndValue() interface{} { + if n.EndVal == nil { + return nil + } + return n.EndVal.Value() +} + +func (n *RangeNode) EndValueAsInt32(min, max int32) (int32, bool) { + if n.Max != nil { + return max, true + } + if n.EndVal == nil { + return n.StartValueAsInt32(min, max) + } + return AsInt32(n.EndVal, min, max) +} + +// ReservedNode represents reserved declaration, which can be used to reserve +// either names or numbers. Examples: +// +// reserved 1, 10-12, 15; +// reserved "foo", "bar", "baz"; +type ReservedNode struct { + compositeNode + Keyword *KeywordNode + // If non-empty, this node represents reserved ranges and Names will be empty. + Ranges []*RangeNode + // If non-empty, this node represents reserved names and Ranges will be empty. + Names []StringValueNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Ranges)-1 or len(Names)-1, depending + // on whether this node represents reserved ranges or reserved names. Each item + // in Ranges or Names has a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + Semicolon *RuneNode +} + +func (*ReservedNode) msgElement() {} +func (*ReservedNode) enumElement() {} + +// NewReservedRangesNode creates a new *ReservedNode that represents reserved +// numeric ranges. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedRangesNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + children := make([]Node, 0, len(ranges)*2+1) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + children = append(children, semicolon) + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Semicolon: semicolon, + } +} + +// NewReservedNamesNode creates a new *ReservedNode that represents reserved +// names. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - names: One or more names. +// - commas: Tokens that represent the "," runes that delimit the names. +// The length of commas must be one less than the length of names. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(names) == 0 { + panic("must have at least one name") + } + if len(commas) != len(names)-1 { + panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas))) + } + children := make([]Node, 0, len(names)*2+1) + children = append(children, keyword) + for i, name := range names { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if name == nil { + panic(fmt.Sprintf("names[%d] is nil", i)) + } + children = append(children, name) + } + children = append(children, semicolon) + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Names: names, + Commas: commas, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/service.go b/vendor/github.com/bufbuild/protocompile/ast/service.go new file mode 100644 index 00000000..c1807488 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/service.go @@ -0,0 +1,286 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// ServiceNode represents a service declaration. Example: +// +// service Foo { +// rpc Bar (Baz) returns (Bob); +// rpc Frobnitz (stream Parts) returns (Gyzmeaux); +// } +type ServiceNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []ServiceElement + CloseBrace *RuneNode +} + +func (*ServiceNode) fileElement() {} + +// NewServiceNode creates a new *ServiceNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "service" keyword. +// - name: The token corresponding to the service's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the service body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewServiceNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []ServiceElement, closeBrace *RuneNode) *ServiceNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *RPCNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ServiceElement type: %T", decl)) + } + children = append(children, decl) + } + children = append(children, closeBrace) + + return &ServiceNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +// ServiceElement is an interface implemented by all AST nodes that can +// appear in the body of a service declaration. +type ServiceElement interface { + Node + serviceElement() +} + +var _ ServiceElement = (*OptionNode)(nil) +var _ ServiceElement = (*RPCNode)(nil) +var _ ServiceElement = (*EmptyDeclNode)(nil) + +// RPCDeclNode is a placeholder interface for AST nodes that represent RPC +// declarations. This allows NoSourceNode to be used in place of *RPCNode +// for some usages. +type RPCDeclNode interface { + Node + GetName() Node + GetInputType() Node + GetOutputType() Node +} + +var _ RPCDeclNode = (*RPCNode)(nil) +var _ RPCDeclNode = NoSourceNode{} + +// RPCNode represents an RPC declaration. Example: +// +// rpc Foo (Bar) returns (Baz); +type RPCNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + Input *RPCTypeNode + Returns *KeywordNode + Output *RPCTypeNode + Semicolon *RuneNode + OpenBrace *RuneNode + Decls []RPCElement + CloseBrace *RuneNode +} + +func (n *RPCNode) serviceElement() {} + +// NewRPCNode creates a new *RPCNode with no body. All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewRPCNode(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, semicolon *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, input, returns, output, semicolon} + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + Semicolon: semicolon, + } +} + +// NewRPCNodeWithBody creates a new *RPCNode that includes a body (and possibly +// options). All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the RPC body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewRPCNodeWithBody(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, openBrace *RuneNode, decls []RPCElement, closeBrace *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 7+len(decls)) + children = append(children, keyword, name, input, returns, output, openBrace) + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid RPCElement type: %T", decl)) + } + children = append(children, decl) + } + children = append(children, closeBrace) + + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *RPCNode) GetName() Node { + return n.Name +} + +func (n *RPCNode) GetInputType() Node { + return n.Input.MessageType +} + +func (n *RPCNode) GetOutputType() Node { + return n.Output.MessageType +} + +// RPCElement is an interface implemented by all AST nodes that can +// appear in the body of an rpc declaration (aka method). +type RPCElement interface { + Node + methodElement() +} + +var _ RPCElement = (*OptionNode)(nil) +var _ RPCElement = (*EmptyDeclNode)(nil) + +// RPCTypeNode represents the declaration of a request or response type for an +// RPC. Example: +// +// (stream foo.Bar) +type RPCTypeNode struct { + compositeNode + OpenParen *RuneNode + Stream *KeywordNode + MessageType IdentValueNode + CloseParen *RuneNode +} + +// NewRPCTypeNode creates a new *RPCTypeNode. All arguments must be non-nil +// except stream, which may be nil. +// - openParen: The token corresponding to the "(" rune that starts the declaration. +// - stream: The token corresponding to the "stream" keyword or nil if not present. +// - msgType: The token corresponding to the message type's name. +// - closeParen: The token corresponding to the ")" rune that ends the declaration. +func NewRPCTypeNode(openParen *RuneNode, stream *KeywordNode, msgType IdentValueNode, closeParen *RuneNode) *RPCTypeNode { + if openParen == nil { + panic("openParen is nil") + } + if msgType == nil { + panic("msgType is nil") + } + if closeParen == nil { + panic("closeParen is nil") + } + var children []Node + if stream != nil { + children = []Node{openParen, stream, msgType, closeParen} + } else { + children = []Node{openParen, msgType, closeParen} + } + + return &RPCTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenParen: openParen, + Stream: stream, + MessageType: msgType, + CloseParen: closeParen, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/values.go b/vendor/github.com/bufbuild/protocompile/ast/values.go new file mode 100644 index 00000000..b9f050aa --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/values.go @@ -0,0 +1,558 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "math" + "strings" +) + +// ValueNode is an AST node that represents a literal value. +// +// It also includes references (e.g. IdentifierValueNode), which can be +// used as values in some contexts, such as describing the default value +// for a field, which can refer to an enum value. +// +// This also allows NoSourceNode to be used in place of a real value node +// for some usages. +type ValueNode interface { + Node + // Value returns a Go representation of the value. For scalars, this + // will be a string, int64, uint64, float64, or bool. This could also + // be an Identifier (e.g. IdentValueNodes). It can also be a composite + // literal: + // * For array literals, the type returned will be []ValueNode + // * For message literals, the type returned will be []*MessageFieldNode + Value() interface{} +} + +var _ ValueNode = (*IdentNode)(nil) +var _ ValueNode = (*CompoundIdentNode)(nil) +var _ ValueNode = (*StringLiteralNode)(nil) +var _ ValueNode = (*CompoundStringLiteralNode)(nil) +var _ ValueNode = (*UintLiteralNode)(nil) +var _ ValueNode = (*PositiveUintLiteralNode)(nil) +var _ ValueNode = (*NegativeIntLiteralNode)(nil) +var _ ValueNode = (*FloatLiteralNode)(nil) +var _ ValueNode = (*SpecialFloatLiteralNode)(nil) +var _ ValueNode = (*SignedFloatLiteralNode)(nil) +var _ ValueNode = (*ArrayLiteralNode)(nil) +var _ ValueNode = (*MessageLiteralNode)(nil) +var _ ValueNode = NoSourceNode{} + +// StringValueNode is an AST node that represents a string literal. +// Such a node can be a single literal (*StringLiteralNode) or a +// concatenation of multiple literals (*CompoundStringLiteralNode). +type StringValueNode interface { + ValueNode + AsString() string +} + +var _ StringValueNode = (*StringLiteralNode)(nil) +var _ StringValueNode = (*CompoundStringLiteralNode)(nil) + +// StringLiteralNode represents a simple string literal. Example: +// +// "proto2" +type StringLiteralNode struct { + terminalNode + // Val is the actual string value that the literal indicates. + Val string +} + +// NewStringLiteralNode creates a new *StringLiteralNode with the given val. +func NewStringLiteralNode(val string, tok Token) *StringLiteralNode { + return &StringLiteralNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *StringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *StringLiteralNode) AsString() string { + return n.Val +} + +// CompoundStringLiteralNode represents a compound string literal, which is +// the concatenaton of adjacent string literals. Example: +// +// "this " "is" " all one " "string" +type CompoundStringLiteralNode struct { + compositeNode + Val string +} + +// NewCompoundLiteralStringNode creates a new *CompoundStringLiteralNode that +// consists of the given string components. The components argument may not be +// empty. +func NewCompoundLiteralStringNode(components ...*StringLiteralNode) *CompoundStringLiteralNode { + if len(components) == 0 { + panic("must have at least one component") + } + children := make([]Node, len(components)) + var b strings.Builder + for i, comp := range components { + children[i] = comp + b.WriteString(comp.Val) + } + return &CompoundStringLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Val: b.String(), + } +} + +func (n *CompoundStringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *CompoundStringLiteralNode) AsString() string { + return n.Val +} + +// IntValueNode is an AST node that represents an integer literal. If +// an integer literal is too large for an int64 (or uint64 for +// positive literals), it is represented instead by a FloatValueNode. +type IntValueNode interface { + ValueNode + AsInt64() (int64, bool) + AsUint64() (uint64, bool) +} + +// AsInt32 range checks the given int value and returns its value is +// in the range or 0, false if it is outside the range. +func AsInt32(n IntValueNode, min, max int32) (int32, bool) { + i, ok := n.AsInt64() + if !ok { + return 0, false + } + if i < int64(min) || i > int64(max) { + return 0, false + } + return int32(i), true +} + +var _ IntValueNode = (*UintLiteralNode)(nil) +var _ IntValueNode = (*PositiveUintLiteralNode)(nil) +var _ IntValueNode = (*NegativeIntLiteralNode)(nil) + +// UintLiteralNode represents a simple integer literal with no sign character. +type UintLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val uint64 +} + +// NewUintLiteralNode creates a new *UintLiteralNode with the given val. +func NewUintLiteralNode(val uint64, tok Token) *UintLiteralNode { + return &UintLiteralNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *UintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *UintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *UintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + +func (n *UintLiteralNode) AsFloat() float64 { + return float64(n.Val) +} + +// PositiveUintLiteralNode represents an integer literal with a positive (+) sign. +type PositiveUintLiteralNode struct { + compositeNode + Plus *RuneNode + Uint *UintLiteralNode + Val uint64 +} + +// NewPositiveUintLiteralNode creates a new *PositiveUintLiteralNode. Both +// arguments must be non-nil. +func NewPositiveUintLiteralNode(sign *RuneNode, i *UintLiteralNode) *PositiveUintLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &PositiveUintLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Plus: sign, + Uint: i, + Val: i.Val, + } +} + +func (n *PositiveUintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *PositiveUintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *PositiveUintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + +// NegativeIntLiteralNode represents an integer literal with a negative (-) sign. +type NegativeIntLiteralNode struct { + compositeNode + Minus *RuneNode + Uint *UintLiteralNode + Val int64 +} + +// NewNegativeIntLiteralNode creates a new *NegativeIntLiteralNode. Both +// arguments must be non-nil. +func NewNegativeIntLiteralNode(sign *RuneNode, i *UintLiteralNode) *NegativeIntLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &NegativeIntLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Minus: sign, + Uint: i, + Val: -int64(i.Val), + } +} + +func (n *NegativeIntLiteralNode) Value() interface{} { + return n.Val +} + +func (n *NegativeIntLiteralNode) AsInt64() (int64, bool) { + return n.Val, true +} + +func (n *NegativeIntLiteralNode) AsUint64() (uint64, bool) { + if n.Val < 0 { + return 0, false + } + return uint64(n.Val), true +} + +// FloatValueNode is an AST node that represents a numeric literal with +// a floating point, in scientific notation, or too large to fit in an +// int64 or uint64. +type FloatValueNode interface { + ValueNode + AsFloat() float64 +} + +var _ FloatValueNode = (*FloatLiteralNode)(nil) +var _ FloatValueNode = (*SpecialFloatLiteralNode)(nil) +var _ FloatValueNode = (*UintLiteralNode)(nil) + +// FloatLiteralNode represents a floating point numeric literal. +type FloatLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val float64 +} + +// NewFloatLiteralNode creates a new *FloatLiteralNode with the given val. +func NewFloatLiteralNode(val float64, tok Token) *FloatLiteralNode { + return &FloatLiteralNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *FloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *FloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SpecialFloatLiteralNode represents a special floating point numeric literal +// for "inf" and "nan" values. +type SpecialFloatLiteralNode struct { + *KeywordNode + Val float64 +} + +// NewSpecialFloatLiteralNode returns a new *SpecialFloatLiteralNode for the +// given keyword, which must be "inf" or "nan". +func NewSpecialFloatLiteralNode(name *KeywordNode) *SpecialFloatLiteralNode { + var f float64 + if name.Val == "inf" { + f = math.Inf(1) + } else { + f = math.NaN() + } + return &SpecialFloatLiteralNode{ + KeywordNode: name, + Val: f, + } +} + +func (n *SpecialFloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *SpecialFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SignedFloatLiteralNode represents a signed floating point number. +type SignedFloatLiteralNode struct { + compositeNode + Sign *RuneNode + Float FloatValueNode + Val float64 +} + +// NewSignedFloatLiteralNode creates a new *SignedFloatLiteralNode. Both +// arguments must be non-nil. +func NewSignedFloatLiteralNode(sign *RuneNode, f FloatValueNode) *SignedFloatLiteralNode { + if sign == nil { + panic("sign is nil") + } + if f == nil { + panic("f is nil") + } + children := []Node{sign, f} + val := f.AsFloat() + if sign.Rune == '-' { + val = -val + } + return &SignedFloatLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Sign: sign, + Float: f, + Val: val, + } +} + +func (n *SignedFloatLiteralNode) Value() interface{} { + return n.Val +} + +func (n *SignedFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// ArrayLiteralNode represents an array literal, which is only allowed inside of +// a MessageLiteralNode, to indicate values for a repeated field. Example: +// +// ["foo", "bar", "baz"] +type ArrayLiteralNode struct { + compositeNode + OpenBracket *RuneNode + Elements []ValueNode + // Commas represent the separating ',' characters between elements. The + // length of this slice must be exactly len(Elements)-1, with each item + // in Elements having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewArrayLiteralNode creates a new *ArrayLiteralNode. The openBracket and +// closeBracket args must be non-nil and represent the "[" and "]" runes that +// surround the array values. The given commas arg must have a length that is +// one less than the length of the vals arg. However, vals may be empty, in +// which case commas must also be empty. +func NewArrayLiteralNode(openBracket *RuneNode, vals []ValueNode, commas []*RuneNode, closeBracket *RuneNode) *ArrayLiteralNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(vals) == 0 && len(commas) != 0 { + panic("vals is empty but commas is not") + } + if len(vals) > 0 && len(commas) != len(vals)-1 { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals)-1, len(commas))) + } + children := make([]Node, 0, len(vals)*2+1) + children = append(children, openBracket) + for i, val := range vals { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + } + children = append(children, closeBracket) + + return &ArrayLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Elements: vals, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (n *ArrayLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageLiteralNode represents a message literal, which is compatible with the +// protobuf text format and can be used for custom options with message types. +// Example: +// +// { foo:1 foo:2 foo:3 bar: } +type MessageLiteralNode struct { + compositeNode + Open *RuneNode // should be '{' or '<' + Elements []*MessageFieldNode + // Separator characters between elements, which can be either ',' + // or ';' if present. This slice must be exactly len(Elements) in + // length, with each item in Elements having one corresponding item + // in Seps. Separators in message literals are optional, so a given + // item in this slice may be nil to indicate absence of a separator. + Seps []*RuneNode + Close *RuneNode // should be '}' or '>', depending on Open +} + +// NewMessageLiteralNode creates a new *MessageLiteralNode. The openSym and +// closeSym runes must not be nil and should be "{" and "}" or "<" and ">". +// +// Unlike separators (dots and commas) used for other AST nodes that represent +// a list of elements, the seps arg must be the SAME length as vals, and it may +// contain nil values to indicate absence of a separator (in fact, it could be +// all nils). +func NewMessageLiteralNode(openSym *RuneNode, vals []*MessageFieldNode, seps []*RuneNode, closeSym *RuneNode) *MessageLiteralNode { + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if len(seps) != len(vals) { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals), len(seps))) + } + numChildren := len(vals) + 2 + for _, sep := range seps { + if sep != nil { + numChildren++ + } + } + children := make([]Node, 0, numChildren) + children = append(children, openSym) + for i, val := range vals { + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + if seps[i] != nil { + children = append(children, seps[i]) + } + } + children = append(children, closeSym) + + return &MessageLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Elements: vals, + Seps: seps, + Close: closeSym, + } +} + +func (n *MessageLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageFieldNode represents a single field (name and value) inside of a +// message literal. Example: +// +// foo:"bar" +type MessageFieldNode struct { + compositeNode + Name *FieldReferenceNode + // Sep represents the ':' separator between the name and value. If + // the value is a message or list literal (and thus starts with '<', + // '{', or '['), then the separator may be omitted and this field may + // be nil. + Sep *RuneNode + Val ValueNode +} + +// NewMessageFieldNode creates a new *MessageFieldNode. All args except sep +// must be non-nil. +func NewMessageFieldNode(name *FieldReferenceNode, sep *RuneNode, val ValueNode) *MessageFieldNode { + if name == nil { + panic("name is nil") + } + if val == nil { + panic("val is nil") + } + numChildren := 2 + if sep != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name) + if sep != nil { + children = append(children, sep) + } + children = append(children, val) + + return &MessageFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Sep: sep, + Val: val, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/walk.go b/vendor/github.com/bufbuild/protocompile/ast/walk.go new file mode 100644 index 00000000..a969ed46 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/walk.go @@ -0,0 +1,910 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// Walk conducts a walk of the AST rooted at the given root using the +// given visitor. It performs a "pre-order traversal", visiting a +// given AST node before it visits that node's descendants. +// +// If a visitor returns an error while walking the tree, the entire +// operation is aborted and that error is returned. +func Walk(root Node, v Visitor, opts ...WalkOption) error { + var wOpts walkOptions + for _, opt := range opts { + opt(&wOpts) + } + return walk(root, v, wOpts) +} + +// WalkOption represents an option used with the Walk function. These +// allow optional before and after hooks to be invoked as each node in +// the tree is visited. +type WalkOption func(*walkOptions) + +type walkOptions struct { + before, after func(Node) error +} + +// WithBefore returns a WalkOption that will cause the given function to be +// invoked before a node is visited during a walk operation. If this hook +// returns an error, the node is not visited and the walk operation is aborted. +func WithBefore(fn func(Node) error) WalkOption { + return func(options *walkOptions) { + options.before = fn + } +} + +// WithAfter returns a WalkOption that will cause the given function to be +// invoked after a node (as well as any descendants) is visited during a walk +// operation. If this hook returns an error, the node is not visited and the +// walk operation is aborted. +// +// If the walk is aborted due to some other visitor or before hook returning an +// error, the after hook is still called for all nodes that have been visited. +// However, the walk operation fails with the first error it encountered, so any +// error returned from an after hook is effectively ignored. +func WithAfter(fn func(Node) error) WalkOption { + return func(options *walkOptions) { + options.after = fn + } +} + +func walk(root Node, v Visitor, opts walkOptions) (err error) { + if opts.before != nil { + if err := opts.before(root); err != nil { + return err + } + } + if opts.after != nil { + defer func() { + if afterErr := opts.after(root); afterErr != nil { + // if another call already returned an error then we + // have to ignore the error from the after hook + if err == nil { + err = afterErr + } + } + }() + } + + if err := Visit(root, v); err != nil { + return err + } + + if comp, ok := root.(CompositeNode); ok { + for _, child := range comp.Children() { + if err := walk(child, v, opts); err != nil { + return err + } + } + } + return nil +} + +// Visit implements the double-dispatch idiom and visits the given node by +// calling the appropriate method of the given visitor. +func Visit(n Node, v Visitor) error { + switch n := n.(type) { + case *FileNode: + return v.VisitFileNode(n) + case *SyntaxNode: + return v.VisitSyntaxNode(n) + case *PackageNode: + return v.VisitPackageNode(n) + case *ImportNode: + return v.VisitImportNode(n) + case *OptionNode: + return v.VisitOptionNode(n) + case *OptionNameNode: + return v.VisitOptionNameNode(n) + case *FieldReferenceNode: + return v.VisitFieldReferenceNode(n) + case *CompactOptionsNode: + return v.VisitCompactOptionsNode(n) + case *MessageNode: + return v.VisitMessageNode(n) + case *ExtendNode: + return v.VisitExtendNode(n) + case *ExtensionRangeNode: + return v.VisitExtensionRangeNode(n) + case *ReservedNode: + return v.VisitReservedNode(n) + case *RangeNode: + return v.VisitRangeNode(n) + case *FieldNode: + return v.VisitFieldNode(n) + case *GroupNode: + return v.VisitGroupNode(n) + case *MapFieldNode: + return v.VisitMapFieldNode(n) + case *MapTypeNode: + return v.VisitMapTypeNode(n) + case *OneOfNode: + return v.VisitOneOfNode(n) + case *EnumNode: + return v.VisitEnumNode(n) + case *EnumValueNode: + return v.VisitEnumValueNode(n) + case *ServiceNode: + return v.VisitServiceNode(n) + case *RPCNode: + return v.VisitRPCNode(n) + case *RPCTypeNode: + return v.VisitRPCTypeNode(n) + case *IdentNode: + return v.VisitIdentNode(n) + case *CompoundIdentNode: + return v.VisitCompoundIdentNode(n) + case *StringLiteralNode: + return v.VisitStringLiteralNode(n) + case *CompoundStringLiteralNode: + return v.VisitCompoundStringLiteralNode(n) + case *UintLiteralNode: + return v.VisitUintLiteralNode(n) + case *PositiveUintLiteralNode: + return v.VisitPositiveUintLiteralNode(n) + case *NegativeIntLiteralNode: + return v.VisitNegativeIntLiteralNode(n) + case *FloatLiteralNode: + return v.VisitFloatLiteralNode(n) + case *SpecialFloatLiteralNode: + return v.VisitSpecialFloatLiteralNode(n) + case *SignedFloatLiteralNode: + return v.VisitSignedFloatLiteralNode(n) + case *ArrayLiteralNode: + return v.VisitArrayLiteralNode(n) + case *MessageLiteralNode: + return v.VisitMessageLiteralNode(n) + case *MessageFieldNode: + return v.VisitMessageFieldNode(n) + case *KeywordNode: + return v.VisitKeywordNode(n) + case *RuneNode: + return v.VisitRuneNode(n) + case *EmptyDeclNode: + return v.VisitEmptyDeclNode(n) + default: + panic(fmt.Sprintf("unexpected type of node: %T", n)) + } +} + +// AncestorTracker is used to track the path of nodes during a walk operation. +// By passing AsWalkOptions to a call to Walk, a visitor can inspect the path to +// the node being visited using this tracker. +type AncestorTracker struct { + ancestors []Node +} + +// AsWalkOptions returns WalkOption values that will cause this ancestor tracker +// to track the path through the AST during the walk operation. +func (t *AncestorTracker) AsWalkOptions() []WalkOption { + return []WalkOption{ + WithBefore(func(n Node) error { + t.ancestors = append(t.ancestors, n) + return nil + }), + WithAfter(func(n Node) error { + t.ancestors = t.ancestors[:len(t.ancestors)-1] + return nil + }), + } +} + +// Path returns a slice of nodes that represents the path from the root of the +// walk operaiton to the currently visited node. The first element in the path +// is the root supplied to Walk. The last element in the path is the currently +// visited node. +// +// The returned slice is not a defensive copy; so callers should NOT mutate it. +func (t *AncestorTracker) Path() []Node { + return t.ancestors +} + +// Parent returns the parent node of the currently visited node. If the node +// currently being visited is the root supplied to Walk then nil is returned. +func (t *AncestorTracker) Parent() Node { + if len(t.ancestors) <= 1 { + return nil + } + return t.ancestors[len(t.ancestors)-2] +} + +// VisitChildren visits all direct children of the given node using the given +// visitor. If visiting a child returns an error, that error is immediately +// returned, and other children will not be visited. +func VisitChildren(n CompositeNode, v Visitor) error { + for _, ch := range n.Children() { + if err := Visit(ch, v); err != nil { + return err + } + } + return nil +} + +// Visitor provides a technique for walking the AST that allows for +// dynamic dispatch, where a particular function is invoked based on +// the runtime type of the argument. +// +// It consists of a number of functions, each of which matches a +// concrete Node type. +// +// Most visitor implementations will either embed NoOpVisitor (so as +// not to have to implement *all* of the methods) or will be instances +// of SimpleVisitor. +// +// Visitors can be supplied to a Walk operation or passed to a call +// to Visit or VisitChildren. +type Visitor interface { + // VisitFileNode is invoked when visiting a *FileNode in the AST. + VisitFileNode(*FileNode) error + // VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST. + VisitSyntaxNode(*SyntaxNode) error + // VisitPackageNode is invoked when visiting a *PackageNode in the AST. + VisitPackageNode(*PackageNode) error + // VisitImportNode is invoked when visiting an *ImportNode in the AST. + VisitImportNode(*ImportNode) error + // VisitOptionNode is invoked when visiting an *OptionNode in the AST. + VisitOptionNode(*OptionNode) error + // VisitOptionNameNode is invoked when visiting an *OptionNameNode in the AST. + VisitOptionNameNode(*OptionNameNode) error + // VisitFieldReferenceNode is invoked when visiting a *FieldReferenceNode in the AST. + VisitFieldReferenceNode(*FieldReferenceNode) error + // VisitCompactOptionsNode is invoked when visiting a *CompactOptionsNode in the AST. + VisitCompactOptionsNode(*CompactOptionsNode) error + // VisitMessageNode is invoked when visiting a *MessageNode in the AST. + VisitMessageNode(*MessageNode) error + // VisitExtendNode is invoked when visiting an *ExtendNode in the AST. + VisitExtendNode(*ExtendNode) error + // VisitExtensionRangeNode is invoked when visiting an *ExtensionRangeNode in the AST. + VisitExtensionRangeNode(*ExtensionRangeNode) error + // VisitReservedNode is invoked when visiting a *ReservedNode in the AST. + VisitReservedNode(*ReservedNode) error + // VisitRangeNode is invoked when visiting a *RangeNode in the AST. + VisitRangeNode(*RangeNode) error + // VisitFieldNode is invoked when visiting a *FieldNode in the AST. + VisitFieldNode(*FieldNode) error + // VisitGroupNode is invoked when visiting a *GroupNode in the AST. + VisitGroupNode(*GroupNode) error + // VisitMapFieldNode is invoked when visiting a *MapFieldNode in the AST. + VisitMapFieldNode(*MapFieldNode) error + // VisitMapTypeNode is invoked when visiting a *MapTypeNode in the AST. + VisitMapTypeNode(*MapTypeNode) error + // VisitOneOfNode is invoked when visiting a *OneOfNode in the AST. + VisitOneOfNode(*OneOfNode) error + // VisitEnumNode is invoked when visiting an *EnumNode in the AST. + VisitEnumNode(*EnumNode) error + // VisitEnumValueNode is invoked when visiting an *EnumValueNode in the AST. + VisitEnumValueNode(*EnumValueNode) error + // VisitServiceNode is invoked when visiting a *ServiceNode in the AST. + VisitServiceNode(*ServiceNode) error + // VisitRPCNode is invoked when visiting an *RPCNode in the AST. + VisitRPCNode(*RPCNode) error + // VisitRPCTypeNode is invoked when visiting an *RPCTypeNode in the AST. + VisitRPCTypeNode(*RPCTypeNode) error + // VisitIdentNode is invoked when visiting an *IdentNode in the AST. + VisitIdentNode(*IdentNode) error + // VisitCompoundIdentNode is invoked when visiting a *CompoundIdentNode in the AST. + VisitCompoundIdentNode(*CompoundIdentNode) error + // VisitStringLiteralNode is invoked when visiting a *StringLiteralNode in the AST. + VisitStringLiteralNode(*StringLiteralNode) error + // VisitCompoundStringLiteralNode is invoked when visiting a *CompoundStringLiteralNode in the AST. + VisitCompoundStringLiteralNode(*CompoundStringLiteralNode) error + // VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST. + VisitUintLiteralNode(*UintLiteralNode) error + // VisitPositiveUintLiteralNode is invoked when visiting a *PositiveUintLiteralNode in the AST. + VisitPositiveUintLiteralNode(*PositiveUintLiteralNode) error + // VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST. + VisitNegativeIntLiteralNode(*NegativeIntLiteralNode) error + // VisitFloatLiteralNode is invoked when visiting a *FloatLiteralNode in the AST. + VisitFloatLiteralNode(*FloatLiteralNode) error + // VisitSpecialFloatLiteralNode is invoked when visiting a *SpecialFloatLiteralNode in the AST. + VisitSpecialFloatLiteralNode(*SpecialFloatLiteralNode) error + // VisitSignedFloatLiteralNode is invoked when visiting a *SignedFloatLiteralNode in the AST. + VisitSignedFloatLiteralNode(*SignedFloatLiteralNode) error + // VisitArrayLiteralNode is invoked when visiting an *ArrayLiteralNode in the AST. + VisitArrayLiteralNode(*ArrayLiteralNode) error + // VisitMessageLiteralNode is invoked when visiting a *MessageLiteralNode in the AST. + VisitMessageLiteralNode(*MessageLiteralNode) error + // VisitMessageFieldNode is invoked when visiting a *MessageFieldNode in the AST. + VisitMessageFieldNode(*MessageFieldNode) error + // VisitKeywordNode is invoked when visiting a *KeywordNode in the AST. + VisitKeywordNode(*KeywordNode) error + // VisitRuneNode is invoked when visiting a *RuneNode in the AST. + VisitRuneNode(*RuneNode) error + // VisitEmptyDeclNode is invoked when visiting a *EmptyDeclNode in the AST. + VisitEmptyDeclNode(*EmptyDeclNode) error +} + +// NoOpVisitor is a visitor implementation that does nothing. All methods +// unconditionally return nil. This can be embedded into a struct to make that +// struct implement the Visitor interface, and only the relevant visit methods +// then need to be implemented on the struct. +type NoOpVisitor struct{} + +var _ Visitor = NoOpVisitor{} + +func (n NoOpVisitor) VisitFileNode(_ *FileNode) error { + return nil +} + +func (n NoOpVisitor) VisitSyntaxNode(_ *SyntaxNode) error { + return nil +} + +func (n NoOpVisitor) VisitPackageNode(_ *PackageNode) error { + return nil +} + +func (n NoOpVisitor) VisitImportNode(_ *ImportNode) error { + return nil +} + +func (n NoOpVisitor) VisitOptionNode(_ *OptionNode) error { + return nil +} + +func (n NoOpVisitor) VisitOptionNameNode(_ *OptionNameNode) error { + return nil +} + +func (n NoOpVisitor) VisitFieldReferenceNode(_ *FieldReferenceNode) error { + return nil +} + +func (n NoOpVisitor) VisitCompactOptionsNode(_ *CompactOptionsNode) error { + return nil +} + +func (n NoOpVisitor) VisitMessageNode(_ *MessageNode) error { + return nil +} + +func (n NoOpVisitor) VisitExtendNode(_ *ExtendNode) error { + return nil +} + +func (n NoOpVisitor) VisitExtensionRangeNode(_ *ExtensionRangeNode) error { + return nil +} + +func (n NoOpVisitor) VisitReservedNode(_ *ReservedNode) error { + return nil +} + +func (n NoOpVisitor) VisitRangeNode(_ *RangeNode) error { + return nil +} + +func (n NoOpVisitor) VisitFieldNode(_ *FieldNode) error { + return nil +} + +func (n NoOpVisitor) VisitGroupNode(_ *GroupNode) error { + return nil +} + +func (n NoOpVisitor) VisitMapFieldNode(_ *MapFieldNode) error { + return nil +} + +func (n NoOpVisitor) VisitMapTypeNode(_ *MapTypeNode) error { + return nil +} + +func (n NoOpVisitor) VisitOneOfNode(_ *OneOfNode) error { + return nil +} + +func (n NoOpVisitor) VisitEnumNode(_ *EnumNode) error { + return nil +} + +func (n NoOpVisitor) VisitEnumValueNode(_ *EnumValueNode) error { + return nil +} + +func (n NoOpVisitor) VisitServiceNode(_ *ServiceNode) error { + return nil +} + +func (n NoOpVisitor) VisitRPCNode(_ *RPCNode) error { + return nil +} + +func (n NoOpVisitor) VisitRPCTypeNode(_ *RPCTypeNode) error { + return nil +} + +func (n NoOpVisitor) VisitIdentNode(_ *IdentNode) error { + return nil +} + +func (n NoOpVisitor) VisitCompoundIdentNode(_ *CompoundIdentNode) error { + return nil +} + +func (n NoOpVisitor) VisitStringLiteralNode(_ *StringLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitCompoundStringLiteralNode(_ *CompoundStringLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitUintLiteralNode(_ *UintLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitPositiveUintLiteralNode(_ *PositiveUintLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitNegativeIntLiteralNode(_ *NegativeIntLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitFloatLiteralNode(_ *FloatLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitSpecialFloatLiteralNode(_ *SpecialFloatLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitSignedFloatLiteralNode(_ *SignedFloatLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitArrayLiteralNode(_ *ArrayLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitMessageLiteralNode(_ *MessageLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitMessageFieldNode(_ *MessageFieldNode) error { + return nil +} + +func (n NoOpVisitor) VisitKeywordNode(_ *KeywordNode) error { + return nil +} + +func (n NoOpVisitor) VisitRuneNode(_ *RuneNode) error { + return nil +} + +func (n NoOpVisitor) VisitEmptyDeclNode(_ *EmptyDeclNode) error { + return nil +} + +// SimpleVisitor is a visitor implementation that uses numerous function fields. +// If a relevant function field is not nil, then it will be invoked when a node +// is visited. +// +// In addition to a function for each concrete node type (and thus for each +// Visit* method of the Visitor interface), it also has function fields that +// accept interface types. So a visitor can, for example, easily treat all +// ValueNodes uniformly by providing a non-nil value for DoVisitValueNode +// instead of having to supply values for the various DoVisit*Node methods +// corresponding to all types that implement ValueNode. +// +// The most specific function provided that matches a given node is the one that +// will be invoked. For example, DoVisitStringValueNode will be called if +// present and applicable before DoVisitValueNode. Similarly, DoVisitValueNode +// would be called before DoVisitTerminalNode or DoVisitCompositeNode. The +// DoVisitNode is the most generic function and is called only if no more +// specific function is present for a given node type. +// +// The *UintLiteralNode type implements both IntValueNode and FloatValueNode. +// In this case, the DoVisitIntValueNode function is considered more specific +// than DoVisitFloatValueNode, so will be preferred if present. +// +// Similarly, *MapFieldNode and *GroupNode implement both FieldDeclNode and +// MessageDeclNode. In this case, the DoVisitFieldDeclNode function is +// treated as more specific than DoVisitMessageDeclNode, so will be preferred +// if both are present. +type SimpleVisitor struct { + DoVisitFileNode func(*FileNode) error + DoVisitSyntaxNode func(*SyntaxNode) error + DoVisitPackageNode func(*PackageNode) error + DoVisitImportNode func(*ImportNode) error + DoVisitOptionNode func(*OptionNode) error + DoVisitOptionNameNode func(*OptionNameNode) error + DoVisitFieldReferenceNode func(*FieldReferenceNode) error + DoVisitCompactOptionsNode func(*CompactOptionsNode) error + DoVisitMessageNode func(*MessageNode) error + DoVisitExtendNode func(*ExtendNode) error + DoVisitExtensionRangeNode func(*ExtensionRangeNode) error + DoVisitReservedNode func(*ReservedNode) error + DoVisitRangeNode func(*RangeNode) error + DoVisitFieldNode func(*FieldNode) error + DoVisitGroupNode func(*GroupNode) error + DoVisitMapFieldNode func(*MapFieldNode) error + DoVisitMapTypeNode func(*MapTypeNode) error + DoVisitOneOfNode func(*OneOfNode) error + DoVisitEnumNode func(*EnumNode) error + DoVisitEnumValueNode func(*EnumValueNode) error + DoVisitServiceNode func(*ServiceNode) error + DoVisitRPCNode func(*RPCNode) error + DoVisitRPCTypeNode func(*RPCTypeNode) error + DoVisitIdentNode func(*IdentNode) error + DoVisitCompoundIdentNode func(*CompoundIdentNode) error + DoVisitStringLiteralNode func(*StringLiteralNode) error + DoVisitCompoundStringLiteralNode func(*CompoundStringLiteralNode) error + DoVisitUintLiteralNode func(*UintLiteralNode) error + DoVisitPositiveUintLiteralNode func(*PositiveUintLiteralNode) error + DoVisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) error + DoVisitFloatLiteralNode func(*FloatLiteralNode) error + DoVisitSpecialFloatLiteralNode func(*SpecialFloatLiteralNode) error + DoVisitSignedFloatLiteralNode func(*SignedFloatLiteralNode) error + DoVisitArrayLiteralNode func(*ArrayLiteralNode) error + DoVisitMessageLiteralNode func(*MessageLiteralNode) error + DoVisitMessageFieldNode func(*MessageFieldNode) error + DoVisitKeywordNode func(*KeywordNode) error + DoVisitRuneNode func(*RuneNode) error + DoVisitEmptyDeclNode func(*EmptyDeclNode) error + + DoVisitFieldDeclNode func(FieldDeclNode) error + DoVisitMessageDeclNode func(MessageDeclNode) error + + DoVisitIdentValueNode func(IdentValueNode) error + DoVisitStringValueNode func(StringValueNode) error + DoVisitIntValueNode func(IntValueNode) error + DoVisitFloatValueNode func(FloatValueNode) error + DoVisitValueNode func(ValueNode) error + + DoVisitTerminalNode func(TerminalNode) error + DoVisitCompositeNode func(CompositeNode) error + DoVisitNode func(Node) error +} + +var _ Visitor = (*SimpleVisitor)(nil) + +func (b *SimpleVisitor) visitInterface(node Node) error { + switch n := node.(type) { + case FieldDeclNode: + if b.DoVisitFieldDeclNode != nil { + return b.DoVisitFieldDeclNode(n) + } + // *MapFieldNode and *GroupNode both implement both FieldDeclNode and + // MessageDeclNode, so handle other case here + if fn, ok := n.(MessageDeclNode); ok && b.DoVisitMessageDeclNode != nil { + return b.DoVisitMessageDeclNode(fn) + } + case MessageDeclNode: + if b.DoVisitMessageDeclNode != nil { + return b.DoVisitMessageDeclNode(n) + } + case IdentValueNode: + if b.DoVisitIdentValueNode != nil { + return b.DoVisitIdentValueNode(n) + } + case StringValueNode: + if b.DoVisitStringValueNode != nil { + return b.DoVisitStringValueNode(n) + } + case IntValueNode: + if b.DoVisitIntValueNode != nil { + return b.DoVisitIntValueNode(n) + } + // *UintLiteralNode implements both IntValueNode and FloatValueNode, + // so handle other case here + if fn, ok := n.(FloatValueNode); ok && b.DoVisitFloatValueNode != nil { + return b.DoVisitFloatValueNode(fn) + } + case FloatValueNode: + if b.DoVisitFloatValueNode != nil { + return b.DoVisitFloatValueNode(n) + } + } + + if n, ok := node.(ValueNode); ok && b.DoVisitValueNode != nil { + return b.DoVisitValueNode(n) + } + + switch n := node.(type) { + case TerminalNode: + if b.DoVisitTerminalNode != nil { + return b.DoVisitTerminalNode(n) + } + case CompositeNode: + if b.DoVisitCompositeNode != nil { + return b.DoVisitCompositeNode(n) + } + } + + if b.DoVisitNode != nil { + return b.DoVisitNode(node) + } + + return nil +} + +func (b *SimpleVisitor) VisitFileNode(node *FileNode) error { + if b.DoVisitFileNode != nil { + return b.DoVisitFileNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitSyntaxNode(node *SyntaxNode) error { + if b.DoVisitSyntaxNode != nil { + return b.DoVisitSyntaxNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitPackageNode(node *PackageNode) error { + if b.DoVisitPackageNode != nil { + return b.DoVisitPackageNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitImportNode(node *ImportNode) error { + if b.DoVisitImportNode != nil { + return b.DoVisitImportNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitOptionNode(node *OptionNode) error { + if b.DoVisitOptionNode != nil { + return b.DoVisitOptionNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitOptionNameNode(node *OptionNameNode) error { + if b.DoVisitOptionNameNode != nil { + return b.DoVisitOptionNameNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitFieldReferenceNode(node *FieldReferenceNode) error { + if b.DoVisitFieldReferenceNode != nil { + return b.DoVisitFieldReferenceNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitCompactOptionsNode(node *CompactOptionsNode) error { + if b.DoVisitCompactOptionsNode != nil { + return b.DoVisitCompactOptionsNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitMessageNode(node *MessageNode) error { + if b.DoVisitMessageNode != nil { + return b.DoVisitMessageNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitExtendNode(node *ExtendNode) error { + if b.DoVisitExtendNode != nil { + return b.DoVisitExtendNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitExtensionRangeNode(node *ExtensionRangeNode) error { + if b.DoVisitExtensionRangeNode != nil { + return b.DoVisitExtensionRangeNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitReservedNode(node *ReservedNode) error { + if b.DoVisitReservedNode != nil { + return b.DoVisitReservedNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitRangeNode(node *RangeNode) error { + if b.DoVisitRangeNode != nil { + return b.DoVisitRangeNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitFieldNode(node *FieldNode) error { + if b.DoVisitFieldNode != nil { + return b.DoVisitFieldNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitGroupNode(node *GroupNode) error { + if b.DoVisitGroupNode != nil { + return b.DoVisitGroupNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitMapFieldNode(node *MapFieldNode) error { + if b.DoVisitMapFieldNode != nil { + return b.DoVisitMapFieldNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitMapTypeNode(node *MapTypeNode) error { + if b.DoVisitMapTypeNode != nil { + return b.DoVisitMapTypeNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitOneOfNode(node *OneOfNode) error { + if b.DoVisitOneOfNode != nil { + return b.DoVisitOneOfNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitEnumNode(node *EnumNode) error { + if b.DoVisitEnumNode != nil { + return b.DoVisitEnumNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitEnumValueNode(node *EnumValueNode) error { + if b.DoVisitEnumValueNode != nil { + return b.DoVisitEnumValueNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitServiceNode(node *ServiceNode) error { + if b.DoVisitServiceNode != nil { + return b.DoVisitServiceNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitRPCNode(node *RPCNode) error { + if b.DoVisitRPCNode != nil { + return b.DoVisitRPCNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitRPCTypeNode(node *RPCTypeNode) error { + if b.DoVisitRPCTypeNode != nil { + return b.DoVisitRPCTypeNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitIdentNode(node *IdentNode) error { + if b.DoVisitIdentNode != nil { + return b.DoVisitIdentNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitCompoundIdentNode(node *CompoundIdentNode) error { + if b.DoVisitCompoundIdentNode != nil { + return b.DoVisitCompoundIdentNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitStringLiteralNode(node *StringLiteralNode) error { + if b.DoVisitStringLiteralNode != nil { + return b.DoVisitStringLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitCompoundStringLiteralNode(node *CompoundStringLiteralNode) error { + if b.DoVisitCompoundStringLiteralNode != nil { + return b.DoVisitCompoundStringLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitUintLiteralNode(node *UintLiteralNode) error { + if b.DoVisitUintLiteralNode != nil { + return b.DoVisitUintLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitPositiveUintLiteralNode(node *PositiveUintLiteralNode) error { + if b.DoVisitPositiveUintLiteralNode != nil { + return b.DoVisitPositiveUintLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitNegativeIntLiteralNode(node *NegativeIntLiteralNode) error { + if b.DoVisitNegativeIntLiteralNode != nil { + return b.DoVisitNegativeIntLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitFloatLiteralNode(node *FloatLiteralNode) error { + if b.DoVisitFloatLiteralNode != nil { + return b.DoVisitFloatLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitSpecialFloatLiteralNode(node *SpecialFloatLiteralNode) error { + if b.DoVisitSpecialFloatLiteralNode != nil { + return b.DoVisitSpecialFloatLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitSignedFloatLiteralNode(node *SignedFloatLiteralNode) error { + if b.DoVisitSignedFloatLiteralNode != nil { + return b.DoVisitSignedFloatLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitArrayLiteralNode(node *ArrayLiteralNode) error { + if b.DoVisitArrayLiteralNode != nil { + return b.DoVisitArrayLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitMessageLiteralNode(node *MessageLiteralNode) error { + if b.DoVisitMessageLiteralNode != nil { + return b.DoVisitMessageLiteralNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitMessageFieldNode(node *MessageFieldNode) error { + if b.DoVisitMessageFieldNode != nil { + return b.DoVisitMessageFieldNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitKeywordNode(node *KeywordNode) error { + if b.DoVisitKeywordNode != nil { + return b.DoVisitKeywordNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitRuneNode(node *RuneNode) error { + if b.DoVisitRuneNode != nil { + return b.DoVisitRuneNode(node) + } + return b.visitInterface(node) +} + +func (b *SimpleVisitor) VisitEmptyDeclNode(node *EmptyDeclNode) error { + if b.DoVisitEmptyDeclNode != nil { + return b.DoVisitEmptyDeclNode(node) + } + return b.visitInterface(node) +} diff --git a/vendor/github.com/bufbuild/protocompile/compiler.go b/vendor/github.com/bufbuild/protocompile/compiler.go new file mode 100644 index 00000000..0bb7f182 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/compiler.go @@ -0,0 +1,648 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protocompile + +import ( + "bytes" + "context" + "fmt" + "io" + "runtime" + "runtime/debug" + "strings" + "sync" + + "golang.org/x/sync/semaphore" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/options" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/sourceinfo" +) + +// Compiler handles compilation tasks, to turn protobuf source files, or other +// intermediate representations, into fully linked descriptors. +// +// The compilation process involves five steps for each protobuf source file: +// 1. Parsing the source into an AST (abstract syntax tree). +// 2. Converting the AST into descriptor protos. +// 3. Linking descriptor protos into fully linked descriptors. +// 4. Interpreting options. +// 5. Computing source code information. +// +// With fully linked descriptors, code generators and protoc plugins could be +// invoked (though that step is not implemented by this package and not a +// responsibility of this type). +type Compiler struct { + // Resolves path/file names into source code or intermediate representations + // for protobuf source files. This is how the compiler loads the files to + // be compiled as well as all dependencies. This field is the only required + // field. + Resolver Resolver + // The maximum parallelism to use when compiling. If unspecified or set to + // a non-positive value, then min(runtime.NumCPU(), runtime.GOMAXPROCS(-1)) + // will be used. + MaxParallelism int + // A custom error and warning reporter. If unspecified a default reporter + // is used. A default reporter fails the compilation after encountering any + // errors and ignores all warnings. + Reporter reporter.Reporter + + // If unspecified or set to SourceInfoNone, source code information will not + // be included in the resulting descriptors. Source code information is + // metadata in the file descriptor that provides position information (i.e. + // the line and column where file elements were defined) as well as comments. + // + // If set to SourceInfoStandard, normal source code information will be + // included in the resulting descriptors. This matches the output of protoc + // (the reference compiler for Protocol Buffers). If set to + // SourceInfoMoreComments, the resulting descriptor will attempt to preserve + // as many comments as possible, for all elements in the file, not just for + // complete declarations. + // + // If Resolver returns descriptors or descriptor protos for a file, then + // those descriptors will not be modified. If they do not already include + // source code info, they will be left that way when the compile operation + // concludes. Similarly, if they already have source code info but this flag + // is false, existing info will be left in place. + SourceInfoMode SourceInfoMode + + // If true, ASTs are retained in compilation results for which an AST was + // constructed. So any linker.Result value in the resulting compiled files + // will have an AST, in addition to descriptors. If left false, the AST + // will be removed as soon as it's no longer needed. This can help reduce + // total memory usage for operations involving a large number of files. + RetainASTs bool +} + +// SourceInfoMode indicates how source code info is generated by a Compiler. +type SourceInfoMode int + +const ( + // SourceInfoNone indicates that no source code info is generated. + SourceInfoNone = SourceInfoMode(iota) + // SourceInfoStandard indicates that the standard source code info is + // generated, which includes comments only for complete declarations. + SourceInfoStandard + // SourceInfoExtraComments indicates that source code info is generated + // and will include comments for all elements (more comments than would + // be found in a descriptor produced by protoc). + SourceInfoExtraComments +) + +// Compile compiles the given file names into fully-linked descriptors. The +// compiler's resolver is used to locate source code (or intermediate artifacts +// such as parsed ASTs or descriptor protos) and then do what is necessary to +// transform that into descriptors (parsing, linking, etc). +// +// Elements in the given returned files will implement [linker.Result] if the +// compiler had to link it (i.e. the resolver provided either a descriptor proto +// or source code). That result will contain a full AST for the file if the +// compiler had to parse it (i.e. the resolver provided source code for that +// file). +func (c *Compiler) Compile(ctx context.Context, files ...string) (linker.Files, error) { + if len(files) == 0 { + return nil, nil + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + par := c.MaxParallelism + if par <= 0 { + par = runtime.GOMAXPROCS(-1) + cpus := runtime.NumCPU() + if par > cpus { + par = cpus + } + } + + h := reporter.NewHandler(c.Reporter) + + e := executor{ + c: c, + h: h, + s: semaphore.NewWeighted(int64(par)), + cancel: cancel, + sym: &linker.Symbols{}, + results: map[string]*result{}, + } + + // We lock now and create all tasks under lock to make sure that no + // async task can create a duplicate result. For example, if files + // contains both "foo.proto" and "bar.proto", then there is a race + // after we start compiling "foo.proto" between this loop and the + // async compilation task to create the result for "bar.proto". But + // we need to know if the file is directly requested for compilation, + // so we need this loop to define the result. So this loop holds the + // lock the whole time so async tasks can't create a result first. + results := make([]*result, len(files)) + func() { + e.mu.Lock() + defer e.mu.Unlock() + for i, f := range files { + results[i] = e.compileLocked(ctx, f, true) + } + }() + + descs := make([]linker.File, len(files)) + var firstError error + for i, r := range results { + select { + case <-r.ready: + case <-ctx.Done(): + return nil, ctx.Err() + } + if r.err != nil { + if firstError == nil { + firstError = r.err + } + } + descs[i] = r.res + } + + if err := h.Error(); err != nil { + return descs, err + } + // this should probably never happen; if any task returned an + // error, h.Error() should be non-nil + return descs, firstError +} + +type result struct { + name string + ready chan struct{} + + // true if this file was explicitly provided to the compiler; otherwise + // this file is an import that is implicitly included + explicitFile bool + + // produces a linker.File or error, only available when ready is closed + res linker.File + err error + + mu sync.Mutex + // the results that are dependencies of this result; this result is + // blocked, waiting on these dependencies to complete + blockedOn []string +} + +func (r *result) fail(err error) { + r.err = err + close(r.ready) +} + +func (r *result) complete(f linker.File) { + r.res = f + close(r.ready) +} + +func (r *result) setBlockedOn(deps []string) { + r.mu.Lock() + defer r.mu.Unlock() + r.blockedOn = deps +} + +func (r *result) getBlockedOn() []string { + r.mu.Lock() + defer r.mu.Unlock() + return r.blockedOn +} + +type executor struct { + c *Compiler + h *reporter.Handler + s *semaphore.Weighted + cancel context.CancelFunc + sym *linker.Symbols + + descriptorProtoCheck sync.Once + descriptorProtoIsCustom bool + + mu sync.Mutex + results map[string]*result +} + +func (e *executor) compile(ctx context.Context, file string) *result { + e.mu.Lock() + defer e.mu.Unlock() + + return e.compileLocked(ctx, file, false) +} + +func (e *executor) compileLocked(ctx context.Context, file string, explicitFile bool) *result { + r := e.results[file] + if r != nil { + return r + } + + r = &result{ + name: file, + ready: make(chan struct{}), + explicitFile: explicitFile, + } + e.results[file] = r + go func() { + defer func() { + if p := recover(); p != nil { + if r.err == nil { + // TODO: strip top frames from stack trace so that the panic is + // the top of the trace? + panicErr := PanicError{File: file, Value: p, Stack: string(debug.Stack())} + r.fail(panicErr) + } + // TODO: if r.err != nil, then this task has already + // failed and there's nothing we can really do to + // communicate this panic to parent goroutine. This + // means the panic must have happened *after* the + // failure was already recorded (or during?) + // It would be nice to do something else here, like + // send the compiler an out-of-band error? Or log? + } + }() + e.doCompile(ctx, file, r) + }() + return r +} + +// PanicError is an error value that represents a recovered panic. It includes +// the value returned by recover() as well as the stack trace. +// +// This should generally only be seen if a Resolver implementation panics. +// +// An error returned by a Compiler may wrap a PanicError, so you may need to +// use errors.As(...) to access panic details. +type PanicError struct { + // The file that was being processed when the panic occurred + File string + // The value returned by recover() + Value interface{} + // A formatted stack trace + Stack string +} + +// Error implements the error interface. It does NOT include the stack trace. +// Use a type assertion and query the Stack field directly to access that. +func (p PanicError) Error() string { + return fmt.Sprintf("panic handling %q: %v", p.File, p.Value) +} + +type errFailedToResolve struct { + err error + path string +} + +func (e errFailedToResolve) Error() string { + errMsg := e.err.Error() + if strings.Contains(errMsg, e.path) { + // underlying error already refers to path in question, so we don't need to add more context + return errMsg + } + return fmt.Sprintf("could not resolve path %q: %s", e.path, e.err.Error()) +} + +func (e errFailedToResolve) Unwrap() error { + return e.err +} + +func (e *executor) hasOverrideDescriptorProto() bool { + e.descriptorProtoCheck.Do(func() { + defer func() { + // ignore a panic here; just assume no custom descriptor.proto + _ = recover() + }() + res, err := e.c.Resolver.FindFileByPath(descriptorProtoPath) + e.descriptorProtoIsCustom = err == nil && res.Desc != standardImports[descriptorProtoPath] + }) + return e.descriptorProtoIsCustom +} + +func (e *executor) doCompile(ctx context.Context, file string, r *result) { + t := task{e: e, h: e.h.SubHandler(), r: r} + if err := e.s.Acquire(ctx, 1); err != nil { + r.fail(err) + return + } + defer t.release() + + sr, err := e.c.Resolver.FindFileByPath(file) + if err != nil { + r.fail(errFailedToResolve{err: err, path: file}) + return + } + + defer func() { + // if results included a result, don't leave it open if it can be closed + if sr.Source == nil { + return + } + if c, ok := sr.Source.(io.Closer); ok { + _ = c.Close() + } + }() + + desc, err := t.asFile(ctx, file, sr) + if err != nil { + r.fail(err) + return + } + r.complete(desc) +} + +// A compilation task. The executor has a semaphore that limits the number +// of concurrent, running tasks. +type task struct { + e *executor + + // handler for this task + h *reporter.Handler + + // If true, this task needs to acquire a semaphore permit before running. + // If false, this task needs to release its semaphore permit on completion. + released bool + + // the result that is populated by this task + r *result +} + +func (t *task) release() { + if !t.released { + t.e.s.Release(1) + t.released = true + } +} + +const descriptorProtoPath = "google/protobuf/descriptor.proto" + +func (t *task) asFile(ctx context.Context, name string, r SearchResult) (linker.File, error) { + if r.Desc != nil { + if r.Desc.Path() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.Desc.Path()) + } + return linker.NewFileRecursive(r.Desc) + } + + parseRes, err := t.asParseResult(name, r) + if err != nil { + return nil, err + } + if linkRes, ok := parseRes.(linker.Result); ok { + // if resolver returned a parse result that was actually a link result, + // use the link result directly (no other steps needed) + return linkRes, nil + } + + var deps []linker.File + fileDescriptorProto := parseRes.FileDescriptorProto() + var wantsDescriptorProto bool + imports := fileDescriptorProto.Dependency + + if t.e.hasOverrideDescriptorProto() { + // we only consider implicitly including descriptor.proto if it's overridden + if name != descriptorProtoPath { + var includesDescriptorProto bool + for _, dep := range fileDescriptorProto.Dependency { + if dep == descriptorProtoPath { + includesDescriptorProto = true + break + } + } + if !includesDescriptorProto { + wantsDescriptorProto = true + // make a defensive copy so we don't inadvertently mutate + // slice's backing array when adding this implicit dep + importsCopy := make([]string, len(imports)+1) + copy(importsCopy, imports) + importsCopy[len(imports)] = descriptorProtoPath + imports = importsCopy + } + } + } + + if len(imports) > 0 { + t.r.setBlockedOn(imports) + + results := make([]*result, len(fileDescriptorProto.Dependency)) + checked := map[string]struct{}{} + for i, dep := range fileDescriptorProto.Dependency { + pos := findImportPos(parseRes, dep) + if name == dep { + // doh! file imports itself + handleImportCycle(t.h, pos, []string{name}, dep) + return nil, t.h.Error() + } + + res := t.e.compile(ctx, dep) + // check for dependency cycle to prevent deadlock + if err := t.e.checkForDependencyCycle(res, []string{name, dep}, pos, checked); err != nil { + return nil, err + } + results[i] = res + } + capacity := len(results) + if wantsDescriptorProto { + capacity++ + } + deps = make([]linker.File, len(results), capacity) + var descriptorProtoRes *result + if wantsDescriptorProto { + descriptorProtoRes = t.e.compile(ctx, descriptorProtoPath) + } + + // release our semaphore so dependencies can be processed w/out risk of deadlock + t.e.s.Release(1) + t.released = true + + // now we wait for them all to be computed + for i, res := range results { + select { + case <-res.ready: + if res.err != nil { + if rerr, ok := res.err.(errFailedToResolve); ok { + // We don't report errors to get file from resolver to handler since + // it's usually considered immediately fatal. However, if the reason + // we were resolving is due to an import, turn this into an error with + // source position that pinpoints the import statement and report it. + return nil, reporter.Error(findImportPos(parseRes, res.name), rerr) + } + return nil, res.err + } + deps[i] = res.res + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if descriptorProtoRes != nil { + select { + case <-descriptorProtoRes.ready: + // descriptor.proto wasn't explicitly imported, so we can ignore a failure + if descriptorProtoRes.err == nil { + deps = append(deps, descriptorProtoRes.res) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + // all deps resolved + t.r.setBlockedOn(nil) + // reacquire semaphore so we can proceed + if err := t.e.s.Acquire(ctx, 1); err != nil { + return nil, err + } + t.released = false + } + + return t.link(parseRes, deps) +} + +func (e *executor) checkForDependencyCycle(res *result, sequence []string, pos ast.SourcePos, checked map[string]struct{}) error { + if _, ok := checked[res.name]; ok { + // already checked this one + return nil + } + checked[res.name] = struct{}{} + deps := res.getBlockedOn() + for _, dep := range deps { + // is this a cycle? + for _, file := range sequence { + if file == dep { + handleImportCycle(e.h, pos, sequence, dep) + return e.h.Error() + } + } + + e.mu.Lock() + depRes := e.results[dep] + e.mu.Unlock() + if depRes == nil { + continue + } + if err := e.checkForDependencyCycle(depRes, append(sequence, dep), pos, checked); err != nil { + return err + } + } + return nil +} + +func handleImportCycle(h *reporter.Handler, pos ast.SourcePos, importSequence []string, dep string) { + var buf bytes.Buffer + buf.WriteString("cycle found in imports: ") + for _, imp := range importSequence { + fmt.Fprintf(&buf, "%q -> ", imp) + } + fmt.Fprintf(&buf, "%q", dep) + // error is saved and returned in caller + h.HandleErrorf(pos, buf.String()) //nolint:errcheck +} + +func findImportPos(res parser.Result, dep string) ast.SourcePos { + root := res.AST() + if root == nil { + return ast.UnknownPos(res.FileNode().Name()) + } + for _, decl := range root.Decls { + if imp, ok := decl.(*ast.ImportNode); ok { + if imp.Name.AsString() == dep { + return root.NodeInfo(imp.Name).Start() + } + } + } + // this should never happen... + return ast.UnknownPos(res.FileNode().Name()) +} + +func (t *task) link(parseRes parser.Result, deps linker.Files) (linker.File, error) { + file, err := linker.Link(parseRes, deps, t.e.sym, t.h) + if err != nil { + return nil, err + } + optsIndex, err := options.InterpretOptions(file, t.h) + if err != nil { + return nil, err + } + // now that options are interpreted, we can do some additional checks + if err := file.ValidateOptions(t.h); err != nil { + return nil, err + } + if t.r.explicitFile { + file.CheckForUnusedImports(t.h) + } + + if needsSourceInfo(parseRes, t.e.c.SourceInfoMode) { + switch t.e.c.SourceInfoMode { + case SourceInfoStandard: + parseRes.FileDescriptorProto().SourceCodeInfo = sourceinfo.GenerateSourceInfo(parseRes.AST(), optsIndex) + case SourceInfoExtraComments: + parseRes.FileDescriptorProto().SourceCodeInfo = sourceinfo.GenerateSourceInfoWithExtraComments(parseRes.AST(), optsIndex) + } + file.PopulateSourceCodeInfo() + } + + if !t.e.c.RetainASTs { + file.RemoveAST() + } + return file, nil +} + +func needsSourceInfo(parseRes parser.Result, mode SourceInfoMode) bool { + return mode != SourceInfoNone && parseRes.AST() != nil && parseRes.FileDescriptorProto().SourceCodeInfo == nil +} + +func (t *task) asParseResult(name string, r SearchResult) (parser.Result, error) { + if r.ParseResult != nil { + if r.ParseResult.FileDescriptorProto().GetName() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.ParseResult.FileDescriptorProto().GetName()) + } + // If the file descriptor needs linking, it will be mutated during the + // next stage. So to make anu mutations thread-safe, we must make a + // defensive copy. + res := parser.Clone(r.ParseResult) + return res, nil + } + + if r.Proto != nil { + if r.Proto.GetName() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.Proto.GetName()) + } + // If the file descriptor needs linking, it will be mutated during the + // next stage. So to make any mutations thread-safe, we must make a + // defensive copy. + descProto := proto.Clone(r.Proto).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + return parser.ResultWithoutAST(descProto), nil + } + + file, err := t.asAST(name, r) + if err != nil { + return nil, err + } + + return parser.ResultFromAST(file, true, t.h) +} + +func (t *task) asAST(name string, r SearchResult) (*ast.FileNode, error) { + if r.AST != nil { + if r.AST.Name() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.AST.Name()) + } + return r.AST, nil + } + + return parser.Parse(name, r.Source, t.h) +} diff --git a/vendor/github.com/bufbuild/protocompile/doc.go b/vendor/github.com/bufbuild/protocompile/doc.go new file mode 100644 index 00000000..661ccc45 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/doc.go @@ -0,0 +1,82 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package protocompile provides the entry point for a high performance +// native Go protobuf compiler. "Compile" in this case just means parsing +// and validating source and generating fully-linked descriptors in the end. +// Unlike the protoc command-line tool, this package does not try to use the +// descriptors to perform code generation. +// +// The various sub-packages represent the various compile phases and contain +// models for the intermediate results. Those phases follow: +// 1. Parse into AST. +// Also see: parser.Parse +// 2. Convert AST to unlinked descriptor protos. +// Also see: parser.ResultFromAST +// 3. Link descriptor protos into "rich" descriptors. +// Also see: linker.Link +// 4. Interpret custom options. +// Also see: options.InterpretOptions +// 5. Generate source code info. +// Also see: sourceinfo.GenerateSourceInfo +// +// This package provides an easy-to-use interface that does all the relevant +// phases, based on the inputs given. If an input is provided as source, all +// phases apply. If an input is provided as a descriptor proto, only phases +// 3 to 5 apply. Nothing is necessary if provided a linked descriptor (which +// is usually only the case for select system dependencies). +// +// This package is also capable of taking advantage of multiple CPU cores, so +// a compilation involving thousands of files can be done very quickly by +// compiling things in parallel. +// +// # Resolvers +// +// A Resolver is how the compiler locates artifacts that are inputs to the +// compilation. For example, it can load protobuf source code that must be +// processed. A Resolver could also supply some already-compiled dependencies +// as fully-linked descriptors, alleviating the need to re-compile them. +// +// A Resolver can provide any of the following in response to a query for an +// input. +// - Source code: If a resolver answers a query with protobuf source, the +// compiler will parse and compile it. +// - AST: If a resolver answers a query with an AST, the parsing step can be +// skipped, and the rest of the compilation steps will be applied. +// - Descriptor proto: If a resolver answers a query with an unlinked proto, +// only the other compilation steps, including linking, need to be applied. +// - Descriptor: If a resolver answers a query with a fully-linked descriptor, +// nothing further needs to be done. The descriptor is used as-is. +// +// Compilation will use the Resolver to load the files that are to be compiled +// and also to load all dependencies (i.e. other files imported by those being +// compiled). +// +// # Compiler +// +// A Compiler accepts a list of file names and produces the list of descriptors. +// A Compiler has several fields that control how it works but only the Resolver +// field is required. A minimal Compiler, that resolves files by loading them +// from the file system based on the current working directory, can be had with +// the following simple snippet: +// +// compiler := protocompile.Compiler{ +// Resolver: &protocompile.SourceResolver{}, +// } +// +// This minimal Compiler will use default parallelism, equal to the number of +// CPU cores detected; it will not generate source code info in the resulting +// descriptors; and it will fail fast at the first sign of any error. All of +// these aspects can be customized by setting other fields. +package protocompile diff --git a/vendor/github.com/bufbuild/protocompile/go.work b/vendor/github.com/bufbuild/protocompile/go.work new file mode 100644 index 00000000..de1c41a8 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/go.work @@ -0,0 +1,7 @@ +go 1.19 + +use ( + . + ./internal/benchmarks + ./internal/tools +) diff --git a/vendor/github.com/bufbuild/protocompile/go.work.sum b/vendor/github.com/bufbuild/protocompile/go.work.sum new file mode 100644 index 00000000..6411d166 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/go.work.sum @@ -0,0 +1,408 @@ +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= +github.com/jhump/protoreflect v1.14.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= +github.com/jhump/protoreflect v1.14.1/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/bufbuild/protocompile/internal/message_context.go b/vendor/github.com/bufbuild/protocompile/internal/message_context.go new file mode 100644 index 00000000..134a05d0 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/message_context.go @@ -0,0 +1,98 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "bytes" + "fmt" + + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" +) + +// ParsedFile wraps an optional AST and required FileDescriptorProto. +// This is used so types like parser.Result can be passed to this internal package avoiding circular imports. +// Additionally, it makes it less likely that users might specify one or the other. +type ParsedFile interface { + // AST returns the parsed abstract syntax tree. This returns nil if the + // Result was created without an AST. + AST() *ast.FileNode + // FileDescriptorProto returns the file descriptor proto. + FileDescriptorProto() *descriptorpb.FileDescriptorProto +} + +// MessageContext provides information about the location in a descriptor +// hierarchy, for adding context to warnings and error messages. +type MessageContext struct { + // The relevant file + File ParsedFile + + // The type and fully-qualified name of the element within the file. + ElementType string + ElementName string + + // If the element being processed is an option (or *in* an option) + // on the named element above, this will be non-nil. + Option *descriptorpb.UninterpretedOption + // If the element being processed is inside a message literal in an + // option value, this will be non-empty and represent a traversal + // to the element in question. + OptAggPath string +} + +func (c *MessageContext) String() string { + var ctx bytes.Buffer + if c.ElementType != "file" { + _, _ = fmt.Fprintf(&ctx, "%s %s: ", c.ElementType, c.ElementName) + } + if c.Option != nil && c.Option.Name != nil { + ctx.WriteString("option ") + writeOptionName(&ctx, c.Option.Name) + if c.File.AST() == nil { + // if we have no source position info, try to provide as much context + // as possible (if nodes != nil, we don't need this because any errors + // will actually have file and line numbers) + if c.OptAggPath != "" { + _, _ = fmt.Fprintf(&ctx, " at %s", c.OptAggPath) + } + } + ctx.WriteString(": ") + } + return ctx.String() +} + +func writeOptionName(buf *bytes.Buffer, parts []*descriptorpb.UninterpretedOption_NamePart) { + first := true + for _, p := range parts { + if first { + first = false + } else { + buf.WriteByte('.') + } + nm := p.GetNamePart() + if nm[0] == '.' { + // skip leading dot + nm = nm[1:] + } + if p.GetIsExtension() { + buf.WriteByte('(') + buf.WriteString(nm) + buf.WriteByte(')') + } else { + buf.WriteString(nm) + } + } +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/norace.go b/vendor/github.com/bufbuild/protocompile/internal/norace.go new file mode 100644 index 00000000..ada2f5cb --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/norace.go @@ -0,0 +1,19 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !race + +package internal + +const IsRace = false diff --git a/vendor/github.com/bufbuild/protocompile/internal/options.go b/vendor/github.com/bufbuild/protocompile/internal/options.go new file mode 100644 index 00000000..5586802b --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/options.go @@ -0,0 +1,59 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" +) + +type hasOptionNode interface { + OptionNode(part *descriptorpb.UninterpretedOption) ast.OptionDeclNode + FileNode() ast.FileDeclNode // needed in order to query for NodeInfo +} + +func FindOption(res hasOptionNode, handler *reporter.Handler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) { + found := -1 + for i, opt := range opts { + if len(opt.Name) != 1 { + continue + } + if opt.Name[0].GetIsExtension() || opt.Name[0].GetNamePart() != name { + continue + } + if found >= 0 { + optNode := res.OptionNode(opt) + fn := res.FileNode() + node := optNode.GetName() + nodeInfo := fn.NodeInfo(node) + return -1, handler.HandleErrorf(nodeInfo.Start(), "%s: option %s cannot be defined more than once", scope, name) + } + found = i + } + return found, nil +} + +func RemoveOption(uo []*descriptorpb.UninterpretedOption, indexToRemove int) []*descriptorpb.UninterpretedOption { + switch { + case indexToRemove == 0: + return uo[1:] + case indexToRemove == len(uo)-1: + return uo[:len(uo)-1] + default: + return append(uo[:indexToRemove], uo[indexToRemove+1:]...) + } +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/race.go b/vendor/github.com/bufbuild/protocompile/internal/race.go new file mode 100644 index 00000000..4a458a3f --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/race.go @@ -0,0 +1,19 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build race + +package internal + +const IsRace = true diff --git a/vendor/github.com/bufbuild/protocompile/internal/tags.go b/vendor/github.com/bufbuild/protocompile/internal/tags.go new file mode 100644 index 00000000..7e674c17 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/tags.go @@ -0,0 +1,243 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "math" + +const ( + // MaxNormalTag is the maximum allowed tag number for a field in a normal message. + MaxNormalTag = 536870911 // 2^29 - 1 + + // MaxMessageSetTag is the maximum allowed tag number of a field in a message that + // uses the message set wire format. + MaxMessageSetTag = math.MaxInt32 - 1 + + // MaxTag is the maximum allowed tag number. (It is the same as MaxMessageSetTag + // since that is the absolute highest allowed.) + MaxTag = MaxMessageSetTag + + // SpecialReservedStart is the first tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedStart = 19000 + // SpecialReservedEnd is the last tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedEnd = 19999 + + // NB: It would be nice to use constants from generated code instead of + // hard-coding these here. But code-gen does not emit these as constants + // anywhere. The only places they appear in generated code are struct tags + // on fields of the generated descriptor protos. + + // FilePackageTag is the tag number of the package element in a file + // descriptor proto. + FilePackageTag = 2 + // FileDependencyTag is the tag number of the dependencies element in a + // file descriptor proto. + FileDependencyTag = 3 + // FileMessagesTag is the tag number of the messages element in a file + // descriptor proto. + FileMessagesTag = 4 + // FileEnumsTag is the tag number of the enums element in a file descriptor + // proto. + FileEnumsTag = 5 + // FileServicesTag is the tag number of the services element in a file + // descriptor proto. + FileServicesTag = 6 + // FileExtensionsTag is the tag number of the extensions element in a file + // descriptor proto. + FileExtensionsTag = 7 + // FileOptionsTag is the tag number of the options element in a file + // descriptor proto. + FileOptionsTag = 8 + // FileSourceCodeInfoTag is the tag number of the source code info element + // in a file descriptor proto. + FileSourceCodeInfoTag = 9 + // FilePublicDependencyTag is the tag number of the public dependency element + // in a file descriptor proto. + FilePublicDependencyTag = 10 + // FileWeakDependencyTag is the tag number of the weak dependency element + // in a file descriptor proto. + FileWeakDependencyTag = 11 + // FileSyntaxTag is the tag number of the syntax element in a file + // descriptor proto. + FileSyntaxTag = 12 + // MessageNameTag is the tag number of the name element in a message + // descriptor proto. + MessageNameTag = 1 + // MessageFieldsTag is the tag number of the fields element in a message + // descriptor proto. + MessageFieldsTag = 2 + // MessageNestedMessagesTag is the tag number of the nested messages + // element in a message descriptor proto. + MessageNestedMessagesTag = 3 + // MessageEnumsTag is the tag number of the enums element in a message + // descriptor proto. + MessageEnumsTag = 4 + // MessageExtensionRangesTag is the tag number of the extension ranges + // element in a message descriptor proto. + MessageExtensionRangesTag = 5 + // MessageExtensionsTag is the tag number of the extensions element in a + // message descriptor proto. + MessageExtensionsTag = 6 + // MessageOptionsTag is the tag number of the options element in a message + // descriptor proto. + MessageOptionsTag = 7 + // MessageOneOfsTag is the tag number of the one-ofs element in a message + // descriptor proto. + MessageOneOfsTag = 8 + // MessageReservedRangesTag is the tag number of the reserved ranges element + // in a message descriptor proto. + MessageReservedRangesTag = 9 + // MessageReservedNamesTag is the tag number of the reserved names element + // in a message descriptor proto. + MessageReservedNamesTag = 10 + // ExtensionRangeStartTag is the tag number of the start index in an + // extension range proto. + ExtensionRangeStartTag = 1 + // ExtensionRangeEndTag is the tag number of the end index in an + // extension range proto. + ExtensionRangeEndTag = 2 + // ExtensionRangeOptionsTag is the tag number of the options element in an + // extension range proto. + ExtensionRangeOptionsTag = 3 + // ReservedRangeStartTag is the tag number of the start index in a reserved + // range proto. This field number is the same for both "flavors" of reserved + // ranges: DescriptorProto.ReservedRange and EnumDescriptorProto.EnumReservedRange. + ReservedRangeStartTag = 1 + // ReservedRangeEndTag is the tag number of the end index in a reserved + // range proto. This field number is the same for both "flavors" of reserved + // ranges: DescriptorProto.ReservedRange and EnumDescriptorProto.EnumReservedRange. + ReservedRangeEndTag = 2 + // FieldNameTag is the tag number of the name element in a field descriptor + // proto. + FieldNameTag = 1 + // FieldExtendeeTag is the tag number of the extendee element in a field + // descriptor proto. + FieldExtendeeTag = 2 + // FieldNumberTag is the tag number of the number element in a field + // descriptor proto. + FieldNumberTag = 3 + // FieldLabelTag is the tag number of the label element in a field + // descriptor proto. + FieldLabelTag = 4 + // FieldTypeTag is the tag number of the type element in a field descriptor + // proto. + FieldTypeTag = 5 + // FieldTypeNameTag is the tag number of the type name element in a field + // descriptor proto. + FieldTypeNameTag = 6 + // FieldDefaultTag is the tag number of the default value element in a + // field descriptor proto. + FieldDefaultTag = 7 + // FieldOptionsTag is the tag number of the options element in a field + // descriptor proto. + FieldOptionsTag = 8 + // FieldOneofIndexTag is the tag number of the oneof index element in a + // field descriptor proto. + FieldOneofIndexTag = 9 + // FieldJSONNameTag is the tag number of the JSON name element in a field + // descriptor proto. + FieldJSONNameTag = 10 + // FieldProto3OptionalTag is the tag number of the proto3_optional element + // in a descriptor proto. + FieldProto3OptionalTag = 17 + // OneOfNameTag is the tag number of the name element in a one-of + // descriptor proto. + OneOfNameTag = 1 + // OneOfOptionsTag is the tag number of the options element in a one-of + // descriptor proto. + OneOfOptionsTag = 2 + // EnumNameTag is the tag number of the name element in an enum descriptor + // proto. + EnumNameTag = 1 + // EnumValuesTag is the tag number of the values element in an enum + // descriptor proto. + EnumValuesTag = 2 + // EnumOptionsTag is the tag number of the options element in an enum + // descriptor proto. + EnumOptionsTag = 3 + // EnumReservedRangesTag is the tag number of the reserved ranges element in + // an enum descriptor proto. + EnumReservedRangesTag = 4 + // EnumReservedNamesTag is the tag number of the reserved names element in + // an enum descriptor proto. + EnumReservedNamesTag = 5 + // EnumValNameTag is the tag number of the name element in an enum value + // descriptor proto. + EnumValNameTag = 1 + // EnumValNumberTag is the tag number of the number element in an enum + // value descriptor proto. + EnumValNumberTag = 2 + // EnumValOptionsTag is the tag number of the options element in an enum + // value descriptor proto. + EnumValOptionsTag = 3 + // ServiceNameTag is the tag number of the name element in a service + // descriptor proto. + ServiceNameTag = 1 + // ServiceMethodsTag is the tag number of the methods element in a service + // descriptor proto. + ServiceMethodsTag = 2 + // ServiceOptionsTag is the tag number of the options element in a service + // descriptor proto. + ServiceOptionsTag = 3 + // MethodNameTag is the tag number of the name element in a method + // descriptor proto. + MethodNameTag = 1 + // MethodInputTag is the tag number of the input type element in a method + // descriptor proto. + MethodInputTag = 2 + // MethodOutputTag is the tag number of the output type element in a method + // descriptor proto. + MethodOutputTag = 3 + // MethodOptionsTag is the tag number of the options element in a method + // descriptor proto. + MethodOptionsTag = 4 + // MethodInputStreamTag is the tag number of the input stream flag in a + // method descriptor proto. + MethodInputStreamTag = 5 + // MethodOutputStreamTag is the tag number of the output stream flag in a + // method descriptor proto. + MethodOutputStreamTag = 6 + + // UninterpretedOptionsTag is the tag number of the uninterpreted options + // element. All *Options messages use the same tag for the field that stores + // uninterpreted options. + UninterpretedOptionsTag = 999 + + // UninterpretedNameTag is the tag number of the name element in an + // uninterpreted options proto. + UninterpretedNameTag = 2 + // UninterpretedIdentTag is the tag number of the identifier value in an + // uninterpreted options proto. + UninterpretedIdentTag = 3 + // UninterpretedPosIntTag is the tag number of the positive int value in an + // uninterpreted options proto. + UninterpretedPosIntTag = 4 + // UninterpretedNegIntTag is the tag number of the negative int value in an + // uninterpreted options proto. + UninterpretedNegIntTag = 5 + // UninterpretedDoubleTag is the tag number of the double value in an + // uninterpreted options proto. + UninterpretedDoubleTag = 6 + // UninterpretedStringTag is the tag number of the string value in an + // uninterpreted options proto. + UninterpretedStringTag = 7 + // UninterpretedAggregateTag is the tag number of the aggregate value in an + // uninterpreted options proto. + UninterpretedAggregateTag = 8 + // UninterpretedNameNameTag is the tag number of the name element in an + // uninterpreted option name proto. + UninterpretedNameNameTag = 1 +) diff --git a/vendor/github.com/bufbuild/protocompile/internal/types.go b/vendor/github.com/bufbuild/protocompile/internal/types.go new file mode 100644 index 00000000..5e54e4af --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/types.go @@ -0,0 +1,35 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "google.golang.org/protobuf/types/descriptorpb" + +var FieldTypes = map[string]descriptorpb.FieldDescriptorProto_Type{ + "double": descriptorpb.FieldDescriptorProto_TYPE_DOUBLE, + "float": descriptorpb.FieldDescriptorProto_TYPE_FLOAT, + "int32": descriptorpb.FieldDescriptorProto_TYPE_INT32, + "int64": descriptorpb.FieldDescriptorProto_TYPE_INT64, + "uint32": descriptorpb.FieldDescriptorProto_TYPE_UINT32, + "uint64": descriptorpb.FieldDescriptorProto_TYPE_UINT64, + "sint32": descriptorpb.FieldDescriptorProto_TYPE_SINT32, + "sint64": descriptorpb.FieldDescriptorProto_TYPE_SINT64, + "fixed32": descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + "fixed64": descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + "sfixed32": descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + "sfixed64": descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + "bool": descriptorpb.FieldDescriptorProto_TYPE_BOOL, + "string": descriptorpb.FieldDescriptorProto_TYPE_STRING, + "bytes": descriptorpb.FieldDescriptorProto_TYPE_BYTES, +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/util.go b/vendor/github.com/bufbuild/protocompile/internal/util.go new file mode 100644 index 00000000..f01513ea --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/util.go @@ -0,0 +1,119 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "bytes" + "unicode" + "unicode/utf8" +) + +// JSONName returns the default JSON name for a field with the given name. +// This mirrors the algorithm in protoc: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95 +func JSONName(name string) string { + var js []rune + nextUpper := false + for _, r := range name { + if r == '_' { + nextUpper = true + continue + } + if nextUpper { + nextUpper = false + js = append(js, unicode.ToUpper(r)) + } else { + js = append(js, r) + } + } + return string(js) +} + +// InitCap returns the given field name, but with the first letter capitalized. +func InitCap(name string) string { + r, sz := utf8.DecodeRuneInString(name) + return string(unicode.ToUpper(r)) + name[sz:] +} + +// CreatePrefixList returns a list of package prefixes to search when resolving +// a symbol name. If the given package is blank, it returns only the empty +// string. If the given package contains only one token, e.g. "foo", it returns +// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns +// successively shorter prefixes of the package and then the empty string. For +// example, for a package named "foo.bar.baz" it will return the following list: +// +// ["foo.bar.baz", "foo.bar", "foo", ""] +func CreatePrefixList(pkg string) []string { + if pkg == "" { + return []string{""} + } + + numDots := 0 + // one pass to pre-allocate the returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + numDots++ + } + } + if numDots == 0 { + return []string{pkg, ""} + } + + prefixes := make([]string, numDots+2) + // second pass to fill in returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + prefixes[numDots] = pkg[:i] + numDots-- + } + } + prefixes[0] = pkg + + return prefixes +} + +func WriteEscapedBytes(buf *bytes.Buffer, b []byte) { + // This uses the same algorithm as the protoc C++ code for escaping strings. + // The protoc C++ code in turn uses the abseil C++ library's CEscape function: + // https://github.com/abseil/abseil-cpp/blob/934f613818ffcb26c942dff4a80be9a4031c662c/absl/strings/escaping.cc#L406 + for _, c := range b { + switch c { + case '\n': + buf.WriteString("\\n") + case '\r': + buf.WriteString("\\r") + case '\t': + buf.WriteString("\\t") + case '"': + buf.WriteString("\\\"") + case '\'': + buf.WriteString("\\'") + case '\\': + buf.WriteString("\\\\") + default: + if c >= 0x20 && c < 0x7f { + // simple printable characters + buf.WriteByte(c) + } else { + // use octal escape for all other values + buf.WriteRune('\\') + buf.WriteByte('0' + ((c >> 6) & 0x7)) + buf.WriteByte('0' + ((c >> 3) & 0x7)) + buf.WriteByte('0' + (c & 0x7)) + } + } + } +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/descriptors.go b/vendor/github.com/bufbuild/protocompile/linker/descriptors.go new file mode 100644 index 00000000..20968936 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/descriptors.go @@ -0,0 +1,1880 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/protoutil" +) + +// This file contains implementations of protoreflect.Descriptor. Note that +// this is a hack since those interfaces have a "doNotImplement" tag +// interface therein. We do just enough to make dynamicpb happy; constructing +// a regular descriptor would fail because we haven't yet interpreted options +// at the point we need these, and some validations will fail if the options +// aren't present. + +type result struct { + protoreflect.FileDescriptor + parser.Result + prefix string + deps Files + + // A map of all descriptors keyed by their fully-qualified name (without + // any leading dot). + descriptors map[string]protoreflect.Descriptor + + // A set of imports that have been used in the course of linking and + // interpreting options. + usedImports map[string]struct{} + + // A map of descriptor options messages to their pre-serialized bytes (using + // a canonical serialization format based on how protoc renders options to + // bytes). + optionBytes map[proto.Message][]byte + + // A map of AST nodes that represent identifiers in ast.FieldReferenceNodes + // to their fully-qualified name. The identifiers are for field names in + // message literals (in option values) that are extension fields. These names + // are resolved during linking and stored here, to be used to interpret options. + optionQualifiedNames map[ast.IdentValueNode]string + + imports fileImports + messages msgDescriptors + enums enumDescriptors + extensions extDescriptors + services svcDescriptors + srcLocations srcLocs +} + +var _ protoreflect.FileDescriptor = (*result)(nil) +var _ Result = (*result)(nil) +var _ protoutil.DescriptorProtoWrapper = (*result)(nil) + +func (r *result) RemoveAST() { + r.Result = parser.ResultWithoutAST(r.FileDescriptorProto()) + r.optionQualifiedNames = nil +} + +func (r *result) AsProto() proto.Message { + return r.FileDescriptorProto() +} + +func (r *result) ParentFile() protoreflect.FileDescriptor { + return r +} + +func (r *result) Parent() protoreflect.Descriptor { + return nil +} + +func (r *result) Index() int { + return 0 +} + +func (r *result) Syntax() protoreflect.Syntax { + switch r.FileDescriptorProto().GetSyntax() { + case "proto2", "": + return protoreflect.Proto2 + case "proto3": + return protoreflect.Proto3 + default: + return 0 // ??? + } +} + +func (r *result) Name() protoreflect.Name { + return "" +} + +func (r *result) FullName() protoreflect.FullName { + return r.Package() +} + +func (r *result) IsPlaceholder() bool { + return false +} + +func (r *result) Options() protoreflect.ProtoMessage { + return r.FileDescriptorProto().Options +} + +func (r *result) Path() string { + return r.FileDescriptorProto().GetName() +} + +func (r *result) Package() protoreflect.FullName { + return protoreflect.FullName(r.FileDescriptorProto().GetPackage()) +} + +func (r *result) Imports() protoreflect.FileImports { + return &r.imports +} + +func (r *result) Enums() protoreflect.EnumDescriptors { + return &r.enums +} + +func (r *result) Messages() protoreflect.MessageDescriptors { + return &r.messages +} + +func (r *result) Extensions() protoreflect.ExtensionDescriptors { + return &r.extensions +} + +func (r *result) Services() protoreflect.ServiceDescriptors { + return &r.services +} + +func (r *result) PopulateSourceCodeInfo() { + srcLocProtos := asSourceLocations(r.FileDescriptorProto().GetSourceCodeInfo().GetLocation()) + srcLocIndex := computeSourceLocIndex(srcLocProtos) + r.srcLocations = srcLocs{file: r, locs: srcLocProtos, index: srcLocIndex} +} + +func (r *result) SourceLocations() protoreflect.SourceLocations { + return &r.srcLocations +} + +func computeSourceLocIndex(locs []protoreflect.SourceLocation) map[interface{}]int { + index := map[interface{}]int{} + for i, loc := range locs { + if loc.Next == 0 { + index[pathKey(loc.Path)] = i + } + } + return index +} + +func asSourceLocations(srcInfoProtos []*descriptorpb.SourceCodeInfo_Location) []protoreflect.SourceLocation { + locs := make([]protoreflect.SourceLocation, len(srcInfoProtos)) + prev := map[string]*protoreflect.SourceLocation{} + for i, loc := range srcInfoProtos { + var stLin, stCol, enLin, enCol int + if len(loc.Span) == 3 { + stLin, stCol, enCol = int(loc.Span[0]), int(loc.Span[1]), int(loc.Span[2]) + enLin = stLin + } else { + stLin, stCol, enLin, enCol = int(loc.Span[0]), int(loc.Span[1]), int(loc.Span[2]), int(loc.Span[3]) + } + locs[i] = protoreflect.SourceLocation{ + Path: loc.Path, + LeadingComments: loc.GetLeadingComments(), + LeadingDetachedComments: loc.GetLeadingDetachedComments(), + TrailingComments: loc.GetTrailingComments(), + StartLine: stLin, + StartColumn: stCol, + EndLine: enLin, + EndColumn: enCol, + } + str := pathStr(loc.Path) + pr := prev[str] + if pr != nil { + pr.Next = i + } + prev[str] = &locs[i] + } + return locs +} + +func pathStr(p protoreflect.SourcePath) string { + var buf bytes.Buffer + for _, v := range p { + fmt.Fprintf(&buf, "%x:", v) + } + return buf.String() +} + +// AddOptionBytes associates the given opts (an options message encoded in the +// binary format) with the given options protobuf message. The protobuf message +// should exist in the hierarchy of this result's FileDescriptorProto. This +// allows the FileDescriptorProto to be marshaled to bytes in a way that +// preserves the way options are defined in source (just as is done by protoc, +// but not possible when only using the generated Go types and standard +// marshaling APIs in the protobuf runtime). +func (r *result) AddOptionBytes(pm proto.Message, opts []byte) { + if r.optionBytes == nil { + r.optionBytes = map[proto.Message][]byte{} + } + r.optionBytes[pm] = append(r.optionBytes[pm], opts...) +} + +func (r *result) CanonicalProto() *descriptorpb.FileDescriptorProto { + origFd := r.FileDescriptorProto() + // make a copy that we can mutate + fd := proto.Clone(origFd).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + + r.storeOptionBytesInFile(fd, origFd) + + return fd +} + +func (r *result) storeOptionBytesInFile(fd, origFd *descriptorpb.FileDescriptorProto) { + if fd.Options != nil { + fd.Options.Reset() + fd.Options.ProtoReflect().SetUnknown(r.optionBytes[origFd.Options]) + } + + for i, md := range fd.MessageType { + origMd := origFd.MessageType[i] + r.storeOptionBytesInMessage(md, origMd) + } + + for i, ed := range fd.EnumType { + origEd := origFd.EnumType[i] + r.storeOptionBytesInEnum(ed, origEd) + } + + for i, exd := range fd.Extension { + origExd := origFd.Extension[i] + r.storeOptionBytesInField(exd, origExd) + } + + for i, sd := range fd.Service { + origSd := origFd.Service[i] + if sd.Options != nil { + sd.Options.Reset() + sd.Options.ProtoReflect().SetUnknown(r.optionBytes[origSd.Options]) + } + + for j, mtd := range sd.Method { + origMtd := origSd.Method[j] + if mtd.Options != nil { + mtd.Options.Reset() + mtd.Options.ProtoReflect().SetUnknown(r.optionBytes[origMtd.Options]) + } + } + } +} + +func (r *result) storeOptionBytesInMessage(md, origMd *descriptorpb.DescriptorProto) { + if md.GetOptions().GetMapEntry() { + // Map entry messages are synthesized. They won't have any option bytes + // since they don't actually appear in the source and thus have any option + // declarations in the source. + return + } + + if md.Options != nil { + md.Options.Reset() + md.Options.ProtoReflect().SetUnknown(r.optionBytes[origMd.Options]) + } + + for i, fld := range md.Field { + origFld := origMd.Field[i] + r.storeOptionBytesInField(fld, origFld) + } + + for i, ood := range md.OneofDecl { + origOod := origMd.OneofDecl[i] + if ood.Options != nil { + ood.Options.Reset() + ood.Options.ProtoReflect().SetUnknown(r.optionBytes[origOod.Options]) + } + } + + for i, exr := range md.ExtensionRange { + origExr := origMd.ExtensionRange[i] + if exr.Options != nil { + exr.Options.Reset() + exr.Options.ProtoReflect().SetUnknown(r.optionBytes[origExr.Options]) + } + } + + for i, nmd := range md.NestedType { + origNmd := origMd.NestedType[i] + r.storeOptionBytesInMessage(nmd, origNmd) + } + + for i, ed := range md.EnumType { + origEd := origMd.EnumType[i] + r.storeOptionBytesInEnum(ed, origEd) + } + + for i, exd := range md.Extension { + origExd := origMd.Extension[i] + r.storeOptionBytesInField(exd, origExd) + } +} + +func (r *result) storeOptionBytesInEnum(ed, origEd *descriptorpb.EnumDescriptorProto) { + if ed.Options != nil { + ed.Options.Reset() + ed.Options.ProtoReflect().SetUnknown(r.optionBytes[origEd.Options]) + } + + for i, evd := range ed.Value { + origEvd := origEd.Value[i] + if evd.Options != nil { + evd.Options.Reset() + evd.Options.ProtoReflect().SetUnknown(r.optionBytes[origEvd.Options]) + } + } +} + +func (r *result) storeOptionBytesInField(fld, origFld *descriptorpb.FieldDescriptorProto) { + if fld.Options != nil { + fld.Options.Reset() + fld.Options.ProtoReflect().SetUnknown(r.optionBytes[origFld.Options]) + } +} + +type fileImports struct { + protoreflect.FileImports + files []protoreflect.FileImport +} + +func (r *result) createImports() fileImports { + fd := r.FileDescriptorProto() + imps := make([]protoreflect.FileImport, len(fd.Dependency)) + for i, dep := range fd.Dependency { + desc := r.deps.FindFileByPath(dep) + imps[i] = protoreflect.FileImport{FileDescriptor: desc} + } + for _, publicIndex := range fd.PublicDependency { + imps[int(publicIndex)].IsPublic = true + } + for _, weakIndex := range fd.WeakDependency { + imps[int(weakIndex)].IsWeak = true + } + return fileImports{files: imps} +} + +func (f *fileImports) Len() int { + return len(f.files) +} + +func (f *fileImports) Get(i int) protoreflect.FileImport { + return f.files[i] +} + +type srcLocs struct { + protoreflect.SourceLocations + file *result + locs []protoreflect.SourceLocation + index map[interface{}]int +} + +func (s *srcLocs) Len() int { + return len(s.locs) +} + +func (s *srcLocs) Get(i int) protoreflect.SourceLocation { + return s.locs[i] +} + +func (s *srcLocs) ByPath(p protoreflect.SourcePath) protoreflect.SourceLocation { + index, ok := s.index[pathKey(p)] + if !ok { + return protoreflect.SourceLocation{} + } + return s.locs[index] +} + +func (s *srcLocs) ByDescriptor(d protoreflect.Descriptor) protoreflect.SourceLocation { + if d.ParentFile() != s.file { + return protoreflect.SourceLocation{} + } + path, ok := computePath(d) + if !ok { + return protoreflect.SourceLocation{} + } + return s.ByPath(path) +} + +func computePath(d protoreflect.Descriptor) (protoreflect.SourcePath, bool) { + _, ok := d.(protoreflect.FileDescriptor) + if ok { + return nil, true + } + var path protoreflect.SourcePath + for { + p := d.Parent() + switch d := d.(type) { + case protoreflect.FileDescriptor: + return reverse(path), true + case protoreflect.MessageDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + path = append(path, internal.FileMessagesTag) + case protoreflect.MessageDescriptor: + path = append(path, internal.MessageNestedMessagesTag) + default: + return nil, false + } + case protoreflect.FieldDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + if d.IsExtension() { + path = append(path, internal.FileExtensionsTag) + } else { + return nil, false + } + case protoreflect.MessageDescriptor: + if d.IsExtension() { + path = append(path, internal.MessageExtensionsTag) + } else { + path = append(path, internal.MessageFieldsTag) + } + default: + return nil, false + } + case protoreflect.OneofDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.MessageDescriptor); ok { + path = append(path, internal.MessageOneOfsTag) + } else { + return nil, false + } + case protoreflect.EnumDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + path = append(path, internal.FileEnumsTag) + case protoreflect.MessageDescriptor: + path = append(path, internal.MessageEnumsTag) + default: + return nil, false + } + case protoreflect.EnumValueDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.EnumDescriptor); ok { + path = append(path, internal.EnumValuesTag) + } else { + return nil, false + } + case protoreflect.ServiceDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.FileDescriptor); ok { + path = append(path, internal.FileServicesTag) + } else { + return nil, false + } + case protoreflect.MethodDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.ServiceDescriptor); ok { + path = append(path, internal.ServiceMethodsTag) + } else { + return nil, false + } + } + d = p + } +} + +func reverse(p protoreflect.SourcePath) protoreflect.SourcePath { + for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 { + p[i], p[j] = p[j], p[i] + } + return p +} + +type msgDescriptors struct { + protoreflect.MessageDescriptors + msgs []*msgDescriptor +} + +func (r *result) createMessages(prefix string, parent protoreflect.Descriptor, msgProtos []*descriptorpb.DescriptorProto) msgDescriptors { + msgs := make([]*msgDescriptor, len(msgProtos)) + for i, msgProto := range msgProtos { + msgs[i] = r.createMessageDescriptor(msgProto, parent, i, prefix+msgProto.GetName()) + } + return msgDescriptors{msgs: msgs} +} + +func (m *msgDescriptors) Len() int { + return len(m.msgs) +} + +func (m *msgDescriptors) Get(i int) protoreflect.MessageDescriptor { + return m.msgs[i] +} + +func (m *msgDescriptors) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + for _, msg := range m.msgs { + if msg.Name() == s { + return msg + } + } + return nil +} + +type msgDescriptor struct { + protoreflect.MessageDescriptor + file *result + parent protoreflect.Descriptor + index int + proto *descriptorpb.DescriptorProto + fqn string + + fields fldDescriptors + oneofs oneofDescriptors + nestedMessages msgDescriptors + nestedEnums enumDescriptors + nestedExtensions extDescriptors + + extRanges fieldRanges + rsvdRanges fieldRanges + rsvdNames names +} + +var _ protoreflect.MessageDescriptor = (*msgDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*msgDescriptor)(nil) + +func (r *result) createMessageDescriptor(md *descriptorpb.DescriptorProto, parent protoreflect.Descriptor, index int, fqn string) *msgDescriptor { + ret := &msgDescriptor{file: r, parent: parent, index: index, proto: md, fqn: fqn} + r.descriptors[fqn] = ret + + prefix := fqn + "." + // NB: We MUST create fields before oneofs so that we can populate the + // set of fields that belong to the oneof + ret.fields = r.createFields(prefix, ret, md.Field) + ret.oneofs = r.createOneOfs(prefix, ret, md.OneofDecl) + ret.nestedMessages = r.createMessages(prefix, ret, md.NestedType) + ret.nestedEnums = r.createEnums(prefix, ret, md.EnumType) + ret.nestedExtensions = r.createExtensions(prefix, ret, md.Extension) + ret.extRanges = createFieldRanges(md.ExtensionRange) + ret.rsvdRanges = createFieldRanges(md.ReservedRange) + ret.rsvdNames = names{s: md.ReservedName} + + return ret +} + +func (m *msgDescriptor) MessageDescriptorProto() *descriptorpb.DescriptorProto { + return m.proto +} + +func (m *msgDescriptor) AsProto() proto.Message { + return m.proto +} + +func (m *msgDescriptor) ParentFile() protoreflect.FileDescriptor { + return m.file +} + +func (m *msgDescriptor) Parent() protoreflect.Descriptor { + return m.parent +} + +func (m *msgDescriptor) Index() int { + return m.index +} + +func (m *msgDescriptor) Syntax() protoreflect.Syntax { + return m.file.Syntax() +} + +func (m *msgDescriptor) Name() protoreflect.Name { + return protoreflect.Name(m.proto.GetName()) +} + +func (m *msgDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(m.fqn) +} + +func (m *msgDescriptor) IsPlaceholder() bool { + return false +} + +func (m *msgDescriptor) Options() protoreflect.ProtoMessage { + return m.proto.Options +} + +func (m *msgDescriptor) IsMapEntry() bool { + return m.proto.Options.GetMapEntry() +} + +func (m *msgDescriptor) Fields() protoreflect.FieldDescriptors { + return &m.fields +} + +func (m *msgDescriptor) Oneofs() protoreflect.OneofDescriptors { + return &m.oneofs +} + +func (m *msgDescriptor) ReservedNames() protoreflect.Names { + return m.rsvdNames +} + +func (m *msgDescriptor) ReservedRanges() protoreflect.FieldRanges { + return m.rsvdRanges +} + +func (m *msgDescriptor) RequiredNumbers() protoreflect.FieldNumbers { + var indexes fieldNums + for _, fld := range m.proto.Field { + if fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + indexes.s = append(indexes.s, fld.GetNumber()) + } + } + return indexes +} + +func (m *msgDescriptor) ExtensionRanges() protoreflect.FieldRanges { + return m.extRanges +} + +func (m *msgDescriptor) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { + return m.proto.ExtensionRange[i].Options +} + +func (m *msgDescriptor) Enums() protoreflect.EnumDescriptors { + return &m.nestedEnums +} + +func (m *msgDescriptor) Messages() protoreflect.MessageDescriptors { + return &m.nestedMessages +} + +func (m *msgDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return &m.nestedExtensions +} + +type names struct { + protoreflect.Names + s []string +} + +func (n names) Len() int { + return len(n.s) +} + +func (n names) Get(i int) protoreflect.Name { + return protoreflect.Name(n.s[i]) +} + +func (n names) Has(s protoreflect.Name) bool { + for _, name := range n.s { + if name == string(s) { + return true + } + } + return false +} + +type fieldNums struct { + protoreflect.FieldNumbers + s []int32 +} + +func (n fieldNums) Len() int { + return len(n.s) +} + +func (n fieldNums) Get(i int) protoreflect.FieldNumber { + return protoreflect.FieldNumber(n.s[i]) +} + +func (n fieldNums) Has(s protoreflect.FieldNumber) bool { + for _, num := range n.s { + if num == int32(s) { + return true + } + } + return false +} + +type fieldRanges struct { + protoreflect.FieldRanges + ranges [][2]protoreflect.FieldNumber +} + +type fieldRange interface { + GetStart() int32 + GetEnd() int32 +} + +func createFieldRanges[T fieldRange](rangeProtos []T) fieldRanges { + ranges := make([][2]protoreflect.FieldNumber, len(rangeProtos)) + for i, r := range rangeProtos { + ranges[i] = [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(r.GetStart()), + protoreflect.FieldNumber(r.GetEnd()), + } + } + return fieldRanges{ranges: ranges} +} + +func (f fieldRanges) Len() int { + return len(f.ranges) +} + +func (f fieldRanges) Get(i int) [2]protoreflect.FieldNumber { + return f.ranges[i] +} + +func (f fieldRanges) Has(n protoreflect.FieldNumber) bool { + for _, r := range f.ranges { + if r[0] <= n && r[1] > n { + return true + } + } + return false +} + +type enumDescriptors struct { + protoreflect.EnumDescriptors + enums []*enumDescriptor +} + +func (r *result) createEnums(prefix string, parent protoreflect.Descriptor, enumProtos []*descriptorpb.EnumDescriptorProto) enumDescriptors { + enums := make([]*enumDescriptor, len(enumProtos)) + for i, enumProto := range enumProtos { + enums[i] = r.createEnumDescriptor(enumProto, parent, i, prefix+enumProto.GetName()) + } + return enumDescriptors{enums: enums} +} + +func (e *enumDescriptors) Len() int { + return len(e.enums) +} + +func (e *enumDescriptors) Get(i int) protoreflect.EnumDescriptor { + return e.enums[i] +} + +func (e *enumDescriptors) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + for _, en := range e.enums { + if en.Name() == s { + return en + } + } + return nil +} + +type enumDescriptor struct { + protoreflect.EnumDescriptor + file *result + parent protoreflect.Descriptor + index int + proto *descriptorpb.EnumDescriptorProto + fqn string + + values enValDescriptors + + rsvdRanges enumRanges + rsvdNames names +} + +var _ protoreflect.EnumDescriptor = (*enumDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*enumDescriptor)(nil) + +func (r *result) createEnumDescriptor(ed *descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, index int, fqn string) *enumDescriptor { + ret := &enumDescriptor{file: r, parent: parent, index: index, proto: ed, fqn: fqn} + r.descriptors[fqn] = ret + + // Unlike all other elements, the fully-qualified name of enum values + // is NOT scoped to their parent element (the enum), but rather to + // the enum's parent element. This follows C++ scoping rules for + // enum values. + prefix := strings.TrimSuffix(fqn, ed.GetName()) + ret.values = r.createEnumValues(prefix, ret, ed.Value) + ret.rsvdRanges = createEnumRanges(ed.ReservedRange) + ret.rsvdNames = names{s: ed.ReservedName} + return ret +} + +func (e *enumDescriptor) EnumDescriptorProto() *descriptorpb.EnumDescriptorProto { + return e.proto +} + +func (e *enumDescriptor) AsProto() proto.Message { + return e.proto +} + +func (e *enumDescriptor) ParentFile() protoreflect.FileDescriptor { + return e.file +} + +func (e *enumDescriptor) Parent() protoreflect.Descriptor { + return e.parent +} + +func (e *enumDescriptor) Index() int { + return e.index +} + +func (e *enumDescriptor) Syntax() protoreflect.Syntax { + return e.file.Syntax() +} + +func (e *enumDescriptor) Name() protoreflect.Name { + return protoreflect.Name(e.proto.GetName()) +} + +func (e *enumDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(e.fqn) +} + +func (e *enumDescriptor) IsPlaceholder() bool { + return false +} + +func (e *enumDescriptor) Options() protoreflect.ProtoMessage { + return e.proto.Options +} + +func (e *enumDescriptor) Values() protoreflect.EnumValueDescriptors { + return &e.values +} + +func (e *enumDescriptor) ReservedNames() protoreflect.Names { + return e.rsvdNames +} + +func (e *enumDescriptor) ReservedRanges() protoreflect.EnumRanges { + return e.rsvdRanges +} + +type enumRanges struct { + protoreflect.EnumRanges + ranges [][2]protoreflect.EnumNumber +} + +func createEnumRanges(rangeProtos []*descriptorpb.EnumDescriptorProto_EnumReservedRange) enumRanges { + ranges := make([][2]protoreflect.EnumNumber, len(rangeProtos)) + for i, r := range rangeProtos { + ranges[i] = [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(r.GetStart()), + protoreflect.EnumNumber(r.GetEnd()), + } + } + return enumRanges{ranges: ranges} +} + +func (e enumRanges) Len() int { + return len(e.ranges) +} + +func (e enumRanges) Get(i int) [2]protoreflect.EnumNumber { + return e.ranges[i] +} + +func (e enumRanges) Has(n protoreflect.EnumNumber) bool { + for _, r := range e.ranges { + if r[0] <= n && r[1] >= n { + return true + } + } + return false +} + +type enValDescriptors struct { + protoreflect.EnumValueDescriptors + vals []*enValDescriptor +} + +func (r *result) createEnumValues(prefix string, parent *enumDescriptor, enValProtos []*descriptorpb.EnumValueDescriptorProto) enValDescriptors { + vals := make([]*enValDescriptor, len(enValProtos)) + for i, enValProto := range enValProtos { + vals[i] = r.createEnumValueDescriptor(enValProto, parent, i, prefix+enValProto.GetName()) + } + return enValDescriptors{vals: vals} +} + +func (e *enValDescriptors) Len() int { + return len(e.vals) +} + +func (e *enValDescriptors) Get(i int) protoreflect.EnumValueDescriptor { + return e.vals[i] +} + +func (e *enValDescriptors) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + for _, val := range e.vals { + if val.Name() == s { + return val + } + } + return nil +} + +func (e *enValDescriptors) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + for _, val := range e.vals { + if val.Number() == n { + return val + } + } + return nil +} + +type enValDescriptor struct { + protoreflect.EnumValueDescriptor + file *result + parent *enumDescriptor + index int + proto *descriptorpb.EnumValueDescriptorProto + fqn string +} + +var _ protoreflect.EnumValueDescriptor = (*enValDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*enValDescriptor)(nil) + +func (r *result) createEnumValueDescriptor(ed *descriptorpb.EnumValueDescriptorProto, parent *enumDescriptor, index int, fqn string) *enValDescriptor { + ret := &enValDescriptor{file: r, parent: parent, index: index, proto: ed, fqn: fqn} + r.descriptors[fqn] = ret + return ret +} + +func (e *enValDescriptor) EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto { + return e.proto +} + +func (e *enValDescriptor) AsProto() proto.Message { + return e.proto +} + +func (e *enValDescriptor) ParentFile() protoreflect.FileDescriptor { + return e.file +} + +func (e *enValDescriptor) Parent() protoreflect.Descriptor { + return e.parent +} + +func (e *enValDescriptor) Index() int { + return e.index +} + +func (e *enValDescriptor) Syntax() protoreflect.Syntax { + return e.file.Syntax() +} + +func (e *enValDescriptor) Name() protoreflect.Name { + return protoreflect.Name(e.proto.GetName()) +} + +func (e *enValDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(e.fqn) +} + +func (e *enValDescriptor) IsPlaceholder() bool { + return false +} + +func (e *enValDescriptor) Options() protoreflect.ProtoMessage { + return e.proto.Options +} + +func (e *enValDescriptor) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(e.proto.GetNumber()) +} + +type extDescriptors struct { + protoreflect.ExtensionDescriptors + exts []*extTypeDescriptor +} + +func (r *result) createExtensions(prefix string, parent protoreflect.Descriptor, extProtos []*descriptorpb.FieldDescriptorProto) extDescriptors { + exts := make([]*extTypeDescriptor, len(extProtos)) + for i, extProto := range extProtos { + exts[i] = r.createExtTypeDescriptor(extProto, parent, i, prefix+extProto.GetName()) + } + return extDescriptors{exts: exts} +} + +func (e *extDescriptors) Len() int { + return len(e.exts) +} + +func (e *extDescriptors) Get(i int) protoreflect.ExtensionDescriptor { + return e.exts[i] +} + +func (e *extDescriptors) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + for _, ext := range e.exts { + if ext.Name() == s { + return ext + } + } + return nil +} + +type extTypeDescriptor struct { + protoreflect.ExtensionTypeDescriptor + field *fldDescriptor +} + +var _ protoutil.DescriptorProtoWrapper = &extTypeDescriptor{} + +func (r *result) createExtTypeDescriptor(fd *descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, index int, fqn string) *extTypeDescriptor { + ret := &fldDescriptor{file: r, parent: parent, index: index, proto: fd, fqn: fqn} + r.descriptors[fqn] = ret + return &extTypeDescriptor{ExtensionTypeDescriptor: dynamicpb.NewExtensionType(ret).TypeDescriptor(), field: ret} +} + +func (e *extTypeDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto { + return e.field.proto +} + +func (e *extTypeDescriptor) AsProto() proto.Message { + return e.field.proto +} + +type fldDescriptors struct { + protoreflect.FieldDescriptors + fields []*fldDescriptor +} + +func (r *result) createFields(prefix string, parent *msgDescriptor, fldProtos []*descriptorpb.FieldDescriptorProto) fldDescriptors { + fields := make([]*fldDescriptor, len(fldProtos)) + for i, fldProto := range fldProtos { + fields[i] = r.createFieldDescriptor(fldProto, parent, i, prefix+fldProto.GetName()) + } + return fldDescriptors{fields: fields} +} + +func (f *fldDescriptors) Len() int { + return len(f.fields) +} + +func (f *fldDescriptors) Get(i int) protoreflect.FieldDescriptor { + return f.fields[i] +} + +func (f *fldDescriptors) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + for _, fld := range f.fields { + if fld.Name() == s { + return fld + } + } + return nil +} + +func (f *fldDescriptors) ByJSONName(s string) protoreflect.FieldDescriptor { + for _, fld := range f.fields { + if fld.JSONName() == s { + return fld + } + } + return nil +} + +func (f *fldDescriptors) ByTextName(s string) protoreflect.FieldDescriptor { + return f.ByName(protoreflect.Name(s)) +} + +func (f *fldDescriptors) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + for _, fld := range f.fields { + if fld.Number() == n { + return fld + } + } + return nil +} + +type fldDescriptor struct { + protoreflect.FieldDescriptor + file *result + parent protoreflect.Descriptor + index int + proto *descriptorpb.FieldDescriptorProto + fqn string + + msgType protoreflect.MessageDescriptor + extendee protoreflect.MessageDescriptor + enumType protoreflect.EnumDescriptor + oneof protoreflect.OneofDescriptor +} + +var _ protoreflect.FieldDescriptor = (*fldDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*fldDescriptor)(nil) + +func (r *result) createFieldDescriptor(fd *descriptorpb.FieldDescriptorProto, parent *msgDescriptor, index int, fqn string) *fldDescriptor { + ret := &fldDescriptor{file: r, parent: parent, index: index, proto: fd, fqn: fqn} + r.descriptors[fqn] = ret + return ret +} + +func (f *fldDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto { + return f.proto +} + +func (f *fldDescriptor) AsProto() proto.Message { + return f.proto +} + +func (f *fldDescriptor) ParentFile() protoreflect.FileDescriptor { + return f.file +} + +func (f *fldDescriptor) Parent() protoreflect.Descriptor { + return f.parent +} + +func (f *fldDescriptor) Index() int { + return f.index +} + +func (f *fldDescriptor) Syntax() protoreflect.Syntax { + return f.file.Syntax() +} + +func (f *fldDescriptor) Name() protoreflect.Name { + return protoreflect.Name(f.proto.GetName()) +} + +func (f *fldDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(f.fqn) +} + +func (f *fldDescriptor) IsPlaceholder() bool { + return false +} + +func (f *fldDescriptor) Options() protoreflect.ProtoMessage { + return f.proto.Options +} + +func (f *fldDescriptor) Number() protoreflect.FieldNumber { + return protoreflect.FieldNumber(f.proto.GetNumber()) +} + +func (f *fldDescriptor) Cardinality() protoreflect.Cardinality { + switch f.proto.GetLabel() { + case descriptorpb.FieldDescriptorProto_LABEL_REPEATED: + return protoreflect.Repeated + case descriptorpb.FieldDescriptorProto_LABEL_REQUIRED: + return protoreflect.Required + case descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL: + return protoreflect.Optional + default: + return 0 + } +} + +func (f *fldDescriptor) Kind() protoreflect.Kind { + return protoreflect.Kind(f.proto.GetType()) +} + +func (f *fldDescriptor) HasJSONName() bool { + return f.proto.JsonName != nil +} + +func (f *fldDescriptor) JSONName() string { + if f.IsExtension() { + return f.TextName() + } + return f.proto.GetJsonName() +} + +func (f *fldDescriptor) TextName() string { + if f.IsExtension() { + return fmt.Sprintf("[%s]", f.FullName()) + } + return string(f.Name()) +} + +func (f *fldDescriptor) HasPresence() bool { + if f.proto.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + return f.IsExtension() || + f.Syntax() == protoreflect.Proto2 || + f.Kind() == protoreflect.MessageKind || f.Kind() == protoreflect.GroupKind || + f.proto.OneofIndex != nil +} + +func (f *fldDescriptor) IsExtension() bool { + return f.proto.GetExtendee() != "" +} + +func (f *fldDescriptor) HasOptionalKeyword() bool { + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { + return false + } + if f.proto.GetProto3Optional() { + // NB: This smells weird to return false here. If the proto3_optional field + // is set, it's because the keyword WAS present. However, the Go runtime + // returns false for this case, so we mirror that behavior. + return !f.IsExtension() + } + // If it's optional, but not a proto3 optional, then the keyword is only + // present for proto2 files, for fields that are not part of a oneof. + return f.file.Syntax() == protoreflect.Proto2 && f.proto.OneofIndex == nil +} + +func (f *fldDescriptor) IsWeak() bool { + return f.proto.Options.GetWeak() +} + +func (f *fldDescriptor) IsPacked() bool { + opts := f.proto.GetOptions() + if opts.GetPacked() { + return true + } + if opts != nil && opts.Packed != nil { + // explicitly not packed + return false + } + + // proto3 defaults to packed for repeated scalar numeric fields + if f.file.Syntax() != protoreflect.Proto3 { + return false + } + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + switch f.proto.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_GROUP, descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_BYTES, descriptorpb.FieldDescriptorProto_TYPE_STRING: + return false + default: + // all others can be packed + return true + } +} + +func (f *fldDescriptor) IsList() bool { + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + return !f.isMapEntry() +} + +func (f *fldDescriptor) IsMap() bool { + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + if f.IsExtension() { + return false + } + return f.isMapEntry() +} + +func (f *fldDescriptor) isMapEntry() bool { + if f.proto.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE { + return false + } + return f.Message().IsMapEntry() +} + +func (f *fldDescriptor) MapKey() protoreflect.FieldDescriptor { + if !f.IsMap() { + return nil + } + return f.Message().Fields().ByNumber(1) +} + +func (f *fldDescriptor) MapValue() protoreflect.FieldDescriptor { + if !f.IsMap() { + return nil + } + return f.Message().Fields().ByNumber(2) +} + +func (f *fldDescriptor) HasDefault() bool { + return f.proto.DefaultValue != nil +} + +func (f *fldDescriptor) Default() protoreflect.Value { + // We only return a valid value for scalar fields + if f.proto.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED || + f.Kind() == protoreflect.GroupKind || f.Kind() == protoreflect.MessageKind { + return protoreflect.Value{} + } + + if f.proto.DefaultValue != nil { + defVal := f.parseDefaultValue(f.proto.GetDefaultValue()) + if defVal.IsValid() { + return defVal + } + // if we cannot parse a valid value, fall back to zero value below + } + + // No custom default value, so return the zero value for the type + switch f.Kind() { + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.EnumKind: + return protoreflect.ValueOfEnum(f.Enum().Values().Get(0).Number()) + case protoreflect.GroupKind, protoreflect.MessageKind: + return protoreflect.ValueOfMessage(dynamicpb.NewMessage(f.Message())) + default: + panic(fmt.Sprintf("unknown kind: %v", f.Kind())) + } +} + +func (f *fldDescriptor) parseDefaultValue(val string) protoreflect.Value { + switch f.Kind() { + case protoreflect.EnumKind: + vd := f.Enum().Values().ByName(protoreflect.Name(val)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()) + } + return protoreflect.Value{} + case protoreflect.BoolKind: + switch val { + case "true": + return protoreflect.ValueOfBool(true) + case "false": + return protoreflect.ValueOfBool(false) + default: + return protoreflect.Value{} + } + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes([]byte(unescape(val))) + case protoreflect.StringKind: + return protoreflect.ValueOfString(val) + case protoreflect.FloatKind: + if f, err := strconv.ParseFloat(val, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(f)) + } + return protoreflect.Value{} + case protoreflect.DoubleKind: + if f, err := strconv.ParseFloat(val, 64); err == nil { + return protoreflect.ValueOfFloat64(f) + } + return protoreflect.Value{} + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if i, err := strconv.ParseInt(val, 10, 32); err == nil { + return protoreflect.ValueOfInt32(int32(i)) + } + return protoreflect.Value{} + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if i, err := strconv.ParseUint(val, 10, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(i)) + } + return protoreflect.Value{} + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if i, err := strconv.ParseInt(val, 10, 64); err == nil { + return protoreflect.ValueOfInt64(i) + } + return protoreflect.Value{} + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if i, err := strconv.ParseUint(val, 10, 64); err == nil { + return protoreflect.ValueOfUint64(i) + } + return protoreflect.Value{} + default: + return protoreflect.Value{} + } +} + +func unescape(s string) string { + // protoc encodes default values for 'bytes' fields using C escaping, + // so this function reverses that escaping + out := make([]byte, 0, len(s)) + var buf [4]byte + for len(s) > 0 { + if s[0] != '\\' || len(s) < 2 { + // not escape sequence, or too short to be well-formed escape + out = append(out, s[0]) + s = s[1:] + continue + } + nextIndex := 2 // by default, skip '\' + escaped character + switch s[1] { + case 'x', 'X': + n := matchPrefix(s[2:], 2, isHex) + if n == 0 { + // bad escape + out = append(out, s[:2]...) + } else { + c, err := strconv.ParseUint(s[2:2+n], 16, 8) + if err != nil { + // shouldn't really happen... + out = append(out, s[:2+n]...) + } else { + out = append(out, byte(c)) + } + nextIndex = 2 + n + } + case '0', '1', '2', '3', '4', '5', '6', '7': + n := 1 + matchPrefix(s[2:], 2, isOctal) + c, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil || c > 0xff { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(c)) + } + nextIndex = 1 + n + case 'u': + if len(s) < 6 { + // bad escape + out = append(out, s...) + nextIndex = len(s) + } else { + c, err := strconv.ParseUint(s[2:6], 16, 16) + if err != nil { + // bad escape + out = append(out, s[:6]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + nextIndex = 6 + } + case 'U': + if len(s) < 10 { + // bad escape + out = append(out, s...) + nextIndex = len(s) + } else { + c, err := strconv.ParseUint(s[2:10], 16, 32) + if err != nil || c > 0x10ffff { + // bad escape + out = append(out, s[:10]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + nextIndex = 10 + } + case 'a': + out = append(out, '\a') + case 'b': + out = append(out, '\b') + case 'f': + out = append(out, '\f') + case 'n': + out = append(out, '\n') + case 'r': + out = append(out, '\r') + case 't': + out = append(out, '\t') + case 'v': + out = append(out, '\v') + case '\\', '\'', '"', '?': + out = append(out, s[1]) + default: + // invalid escape, just copy it as-is + out = append(out, s[:2]...) + } + s = s[nextIndex:] + } + return string(out) +} + +func isOctal(b byte) bool { return b >= '0' && b <= '7' } +func isHex(b byte) bool { + return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F') +} +func matchPrefix(s string, limit int, fn func(byte) bool) int { + l := len(s) + if l > limit { + l = limit + } + i := 0 + for ; i < l; i++ { + if !fn(s[i]) { + return i + } + } + return i +} + +func (f *fldDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := f.Enum() + if ed == nil { + return nil + } + if f.proto.DefaultValue != nil { + if val := ed.Values().ByName(protoreflect.Name(f.proto.GetDefaultValue())); val != nil { + return val + } + } + // if no default specified in source, return nil + return nil +} + +func (f *fldDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + return f.oneof +} + +func (f *fldDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + if f.extendee != nil { + return f.extendee + } + return f.parent.(protoreflect.MessageDescriptor) +} + +func (f *fldDescriptor) Enum() protoreflect.EnumDescriptor { + return f.enumType +} + +func (f *fldDescriptor) Message() protoreflect.MessageDescriptor { + return f.msgType +} + +type oneofDescriptors struct { + protoreflect.OneofDescriptors + oneofs []*oneofDescriptor +} + +func (r *result) createOneOfs(prefix string, parent *msgDescriptor, ooProtos []*descriptorpb.OneofDescriptorProto) oneofDescriptors { + oos := make([]*oneofDescriptor, len(ooProtos)) + for i, fldProto := range ooProtos { + oos[i] = r.createOneOfDescriptor(fldProto, parent, i, prefix+fldProto.GetName()) + } + return oneofDescriptors{oneofs: oos} +} + +func (o *oneofDescriptors) Len() int { + return len(o.oneofs) +} + +func (o *oneofDescriptors) Get(i int) protoreflect.OneofDescriptor { + return o.oneofs[i] +} + +func (o *oneofDescriptors) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + for _, oo := range o.oneofs { + if oo.Name() == s { + return oo + } + } + return nil +} + +type oneofDescriptor struct { + protoreflect.OneofDescriptor + file *result + parent *msgDescriptor + index int + proto *descriptorpb.OneofDescriptorProto + fqn string + + fields fldDescriptors +} + +var _ protoreflect.OneofDescriptor = (*oneofDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*oneofDescriptor)(nil) + +func (r *result) createOneOfDescriptor(ood *descriptorpb.OneofDescriptorProto, parent *msgDescriptor, index int, fqn string) *oneofDescriptor { + ret := &oneofDescriptor{file: r, parent: parent, index: index, proto: ood, fqn: fqn} + r.descriptors[fqn] = ret + + var fields []*fldDescriptor + for _, fld := range parent.fields.fields { + if fld.proto.OneofIndex != nil && int(fld.proto.GetOneofIndex()) == index { + fields = append(fields, fld) + } + } + ret.fields = fldDescriptors{fields: fields} + + return ret +} + +func (o *oneofDescriptor) OneOfDescriptorProto() *descriptorpb.OneofDescriptorProto { + return o.proto +} + +func (o *oneofDescriptor) AsProto() proto.Message { + return o.proto +} + +func (o *oneofDescriptor) ParentFile() protoreflect.FileDescriptor { + return o.file +} + +func (o *oneofDescriptor) Parent() protoreflect.Descriptor { + return o.parent +} + +func (o *oneofDescriptor) Index() int { + return o.index +} + +func (o *oneofDescriptor) Syntax() protoreflect.Syntax { + return o.file.Syntax() +} + +func (o *oneofDescriptor) Name() protoreflect.Name { + return protoreflect.Name(o.proto.GetName()) +} + +func (o *oneofDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(o.fqn) +} + +func (o *oneofDescriptor) IsPlaceholder() bool { + return false +} + +func (o *oneofDescriptor) Options() protoreflect.ProtoMessage { + return o.proto.Options +} + +func (o *oneofDescriptor) IsSynthetic() bool { + for _, fld := range o.parent.proto.GetField() { + if fld.OneofIndex != nil && int(fld.GetOneofIndex()) == o.index { + return fld.GetProto3Optional() + } + } + return false // NB: we should never get here +} + +func (o *oneofDescriptor) Fields() protoreflect.FieldDescriptors { + return &o.fields +} + +type svcDescriptors struct { + protoreflect.ServiceDescriptors + svcs []*svcDescriptor +} + +func (r *result) createServices(prefix string, svcProtos []*descriptorpb.ServiceDescriptorProto) svcDescriptors { + svcs := make([]*svcDescriptor, len(svcProtos)) + for i, svcProto := range svcProtos { + svcs[i] = r.createServiceDescriptor(svcProto, i, prefix+svcProto.GetName()) + } + return svcDescriptors{svcs: svcs} +} + +func (s *svcDescriptors) Len() int { + return len(s.svcs) +} + +func (s *svcDescriptors) Get(i int) protoreflect.ServiceDescriptor { + return s.svcs[i] +} + +func (s *svcDescriptors) ByName(n protoreflect.Name) protoreflect.ServiceDescriptor { + for _, svc := range s.svcs { + if svc.Name() == n { + return svc + } + } + return nil +} + +type svcDescriptor struct { + protoreflect.ServiceDescriptor + file *result + index int + proto *descriptorpb.ServiceDescriptorProto + fqn string + + methods mtdDescriptors +} + +var _ protoreflect.ServiceDescriptor = (*svcDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*svcDescriptor)(nil) + +func (r *result) createServiceDescriptor(sd *descriptorpb.ServiceDescriptorProto, index int, fqn string) *svcDescriptor { + ret := &svcDescriptor{file: r, index: index, proto: sd, fqn: fqn} + r.descriptors[fqn] = ret + + prefix := fqn + "." + ret.methods = r.createMethods(prefix, ret, sd.Method) + + return ret +} + +func (s *svcDescriptor) ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto { + return s.proto +} + +func (s *svcDescriptor) AsProto() proto.Message { + return s.proto +} + +func (s *svcDescriptor) ParentFile() protoreflect.FileDescriptor { + return s.file +} + +func (s *svcDescriptor) Parent() protoreflect.Descriptor { + return s.file +} + +func (s *svcDescriptor) Index() int { + return s.index +} + +func (s *svcDescriptor) Syntax() protoreflect.Syntax { + return s.file.Syntax() +} + +func (s *svcDescriptor) Name() protoreflect.Name { + return protoreflect.Name(s.proto.GetName()) +} + +func (s *svcDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(s.fqn) +} + +func (s *svcDescriptor) IsPlaceholder() bool { + return false +} + +func (s *svcDescriptor) Options() protoreflect.ProtoMessage { + return s.proto.Options +} + +func (s *svcDescriptor) Methods() protoreflect.MethodDescriptors { + return &s.methods +} + +type mtdDescriptors struct { + protoreflect.MethodDescriptors + mtds []*mtdDescriptor +} + +func (r *result) createMethods(prefix string, parent *svcDescriptor, mtdProtos []*descriptorpb.MethodDescriptorProto) mtdDescriptors { + mtds := make([]*mtdDescriptor, len(mtdProtos)) + for i, mtdProto := range mtdProtos { + mtds[i] = r.createMethodDescriptor(mtdProto, parent, i, prefix+mtdProto.GetName()) + } + return mtdDescriptors{mtds: mtds} +} + +func (m *mtdDescriptors) Len() int { + return len(m.mtds) +} + +func (m *mtdDescriptors) Get(i int) protoreflect.MethodDescriptor { + return m.mtds[i] +} + +func (m *mtdDescriptors) ByName(n protoreflect.Name) protoreflect.MethodDescriptor { + for _, mtd := range m.mtds { + if mtd.Name() == n { + return mtd + } + } + return nil +} + +type mtdDescriptor struct { + protoreflect.MethodDescriptor + file *result + parent *svcDescriptor + index int + proto *descriptorpb.MethodDescriptorProto + fqn string + + inputType, outputType protoreflect.MessageDescriptor +} + +var _ protoreflect.MethodDescriptor = (*mtdDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*mtdDescriptor)(nil) + +func (r *result) createMethodDescriptor(mtd *descriptorpb.MethodDescriptorProto, parent *svcDescriptor, index int, fqn string) *mtdDescriptor { + ret := &mtdDescriptor{file: r, parent: parent, index: index, proto: mtd, fqn: fqn} + r.descriptors[fqn] = ret + return ret +} + +func (m *mtdDescriptor) MethodDescriptorProto() *descriptorpb.MethodDescriptorProto { + return m.proto +} + +func (m *mtdDescriptor) AsProto() proto.Message { + return m.proto +} + +func (m *mtdDescriptor) ParentFile() protoreflect.FileDescriptor { + return m.file +} + +func (m *mtdDescriptor) Parent() protoreflect.Descriptor { + return m.parent +} + +func (m *mtdDescriptor) Index() int { + return m.index +} + +func (m *mtdDescriptor) Syntax() protoreflect.Syntax { + return m.file.Syntax() +} + +func (m *mtdDescriptor) Name() protoreflect.Name { + return protoreflect.Name(m.proto.GetName()) +} + +func (m *mtdDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(m.fqn) +} + +func (m *mtdDescriptor) IsPlaceholder() bool { + return false +} + +func (m *mtdDescriptor) Options() protoreflect.ProtoMessage { + return m.proto.Options +} + +func (m *mtdDescriptor) Input() protoreflect.MessageDescriptor { + return m.inputType +} + +func (m *mtdDescriptor) Output() protoreflect.MessageDescriptor { + return m.outputType +} + +func (m *mtdDescriptor) IsStreamingClient() bool { + return m.proto.GetClientStreaming() +} + +func (m *mtdDescriptor) IsStreamingServer() bool { + return m.proto.GetServerStreaming() +} + +func (r *result) FindImportByPath(path string) File { + return r.deps.FindFileByPath(path) +} + +func (r *result) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + return findExtension(r, msg, tag) +} + +func (r *result) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor { + fqn := strings.TrimPrefix(string(name), ".") + return r.descriptors[fqn] +} + +func (r *result) importsAsFiles() Files { + return r.deps +} + +func (r *result) hasSource() bool { + n := r.FileNode() + _, ok := n.(*ast.FileNode) + return ok +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/doc.go b/vendor/github.com/bufbuild/protocompile/linker/doc.go new file mode 100644 index 00000000..455c2ef6 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/doc.go @@ -0,0 +1,48 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package linker contains logic and APIs related to linking a protobuf file. +// The process of linking involves resolving all symbol references to the +// referenced descriptor. The result of linking is a "rich" descriptor that +// is more useful than just a descriptor proto since the links allow easy +// traversal of a protobuf type schema and the relationships between elements. +// +// # Files +// +// This package uses an augmentation to protoreflect.FileDescriptor instances +// in the form of the File interface. There are also factory functions for +// promoting a FileDescriptor into a linker.File. This new interface provides +// additional methods for resolving symbols in the file. +// +// This interface is both the result of linking but also an input to the linking +// process, as all dependencies of a file to be linked must be provided in this +// form. The actual result of the Link function, a Result, is an even broader +// interface than File: The linker.Result interface provides even more functions, +// which are needed for subsequent compilation steps: interpreting options and +// generating source code info. +// +// # Symbols +// +// This package has a type named Symbols which represents a symbol table. This +// is usually an internal detail when linking, but callers can provide an +// instance so that symbols across multiple compile/link operations all have +// access to the same table. This allows for detection of cases where multiple +// files try to declare elements with conflicting fully-qualified names or +// declare extensions for a particular extendable message that have conflicting +// tag numbers. +// +// The calling code simply uses the same Symbols instance across all compile +// operations and if any files processed have such conflicts, they can be +// reported. +package linker diff --git a/vendor/github.com/bufbuild/protocompile/linker/files.go b/vendor/github.com/bufbuild/protocompile/linker/files.go new file mode 100644 index 00000000..3acdcade --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/files.go @@ -0,0 +1,376 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/walk" +) + +// File is like a super-powered protoreflect.FileDescriptor. It includes helpful +// methods for looking up elements in the descriptor and can be used to create a +// resolver for all of the file's transitive closure of dependencies. (See +// ResolverFromFile.) +type File interface { + protoreflect.FileDescriptor + // FindDescriptorByName returns the given named element that is defined in + // this file. If no such element exists, nil is returned. + FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor + // FindImportByPath returns the File corresponding to the given import path. + // If this file does not import the given path, nil is returned. + FindImportByPath(path string) File + // FindExtensionByNumber returns the extension descriptor for the given tag + // that extends the given message name. If no such extension is defined in this + // file, nil is returned. + FindExtensionByNumber(message protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor + // Imports returns this file's imports. These are only the files directly + // imported by the file. Indirect transitive dependencies will not be in + // the returned slice. + importsAsFiles() Files +} + +// NewFile converts a protoreflect.FileDescriptor to a File. The given deps must +// contain all dependencies/imports of f. Also see NewFileRecursive. +func NewFile(f protoreflect.FileDescriptor, deps Files) (File, error) { + if asFile, ok := f.(File); ok { + return asFile, nil + } + checkedDeps := make(Files, f.Imports().Len()) + for i := 0; i < f.Imports().Len(); i++ { + imprt := f.Imports().Get(i) + dep := deps.FindFileByPath(imprt.Path()) + if dep == nil { + return nil, fmt.Errorf("cannot create File for %q: missing dependency for %q", f.Path(), imprt.Path()) + } + checkedDeps[i] = dep + } + return newFile(f, checkedDeps) +} + +func newFile(f protoreflect.FileDescriptor, deps Files) (File, error) { + descs := map[protoreflect.FullName]protoreflect.Descriptor{} + err := walk.Descriptors(f, func(d protoreflect.Descriptor) error { + if _, ok := descs[d.FullName()]; ok { + return fmt.Errorf("file %q contains multiple elements with the name %s", f.Path(), d.FullName()) + } + descs[d.FullName()] = d + return nil + }) + if err != nil { + return nil, err + } + return &file{ + FileDescriptor: f, + descs: descs, + deps: deps, + }, nil +} + +// NewFileRecursive recursively converts a protoreflect.FileDescriptor to a File. +// If f has any dependencies/imports, they are converted, too, including any and +// all transitive dependencies. +// +// If f already implements File, it is returned unchanged. +func NewFileRecursive(f protoreflect.FileDescriptor) (File, error) { + if asFile, ok := f.(File); ok { + return asFile, nil + } + return newFileRecursive(f, map[protoreflect.FileDescriptor]File{}) +} + +func newFileRecursive(fd protoreflect.FileDescriptor, seen map[protoreflect.FileDescriptor]File) (File, error) { + if res, ok := seen[fd]; ok { + if res == nil { + return nil, fmt.Errorf("import cycle encountered: file %s transitively imports itself", fd.Path()) + } + return res, nil + } + + if f, ok := fd.(File); ok { + seen[fd] = f + return f, nil + } + + seen[fd] = nil + deps := make([]File, fd.Imports().Len()) + for i := 0; i < fd.Imports().Len(); i++ { + imprt := fd.Imports().Get(i) + dep, err := newFileRecursive(imprt, seen) + if err != nil { + return nil, err + } + deps[i] = dep + } + + f, err := newFile(fd, deps) + if err != nil { + return nil, err + } + seen[fd] = f + return f, nil +} + +type file struct { + protoreflect.FileDescriptor + descs map[protoreflect.FullName]protoreflect.Descriptor + deps Files +} + +func (f *file) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor { + return f.descs[name] +} + +func (f *file) FindImportByPath(path string) File { + return f.deps.FindFileByPath(path) +} + +func (f *file) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + return findExtension(f, msg, tag) +} + +func (f *file) importsAsFiles() Files { + return f.deps +} + +var _ File = (*file)(nil) + +// Files represents a set of protobuf files. It is a slice of File values, but +// also provides a method for easily looking up files by path and name. +type Files []File + +// FindFileByPath finds a file in f that has the given path and name. If f +// contains no such file, nil is returned. +func (f Files) FindFileByPath(path string) File { + for _, file := range f { + if file.Path() == path { + return file + } + } + return nil +} + +// AsResolver returns a Resolver that uses f as the source of descriptors. If +// a given query cannot be answered with the files in f, the query will fail +// with a protoregistry.NotFound error. The implementation just delegates calls +// to each file until a result is found. +// +// Also see ResolverFromFile. +func (f Files) AsResolver() Resolver { + return filesResolver(f) +} + +// Resolver is an interface that can resolve various kinds of queries about +// descriptors. It satisfies the resolver interfaces defined in protodesc +// and protoregistry packages. +type Resolver interface { + protodesc.Resolver + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver +} + +// ResolverFromFile returns a Resolver that uses the given file plus all of its +// imports as the source of descriptors. If a given query cannot be answered with +// these files, the query will fail with a protoregistry.NotFound error. This +// does not recursively search the entire transitive closure; it only searches +// the given file and its immediate dependencies. This is useful for resolving +// elements visible to the file. +// +// If the given file is the result of a call to Link, then all dependencies +// provided in the call to Link are searched (which could actually include more +// than just the file's direct imports). +// +// Note that this function does not compute any additional indexes for efficient +// search, so queries generally take linear time, O(n) where n is the number of +// files in the transitive closure of the given file. Queries for an extension +// by number are linear with the number of messages and extensions defined across +// all the files. +func ResolverFromFile(f File) Resolver { + return fileResolver{ + f: f, + deps: f.importsAsFiles().AsResolver(), + } +} + +type fileResolver struct { + f File + deps Resolver +} + +func (r fileResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + if r.f.Path() == path { + return r.f, nil + } + return r.deps.FindFileByPath(path) +} + +func (r fileResolver) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + d := r.f.FindDescriptorByName(name) + if d != nil { + return d, nil + } + return r.deps.FindDescriptorByName(name) +} + +func (r fileResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + d := r.f.FindDescriptorByName(message) + if d != nil { + if md, ok := d.(protoreflect.MessageDescriptor); ok { + return dynamicpb.NewMessageType(md), nil + } + return nil, protoregistry.NotFound + } + return r.deps.FindMessageByName(message) +} + +func (r fileResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + fullName := messageNameFromURL(url) + return r.FindMessageByName(protoreflect.FullName(fullName)) +} + +func messageNameFromURL(url string) string { + lastSlash := strings.LastIndexByte(url, '/') + var fullName string + if lastSlash >= 0 { + fullName = url[lastSlash+1:] + } else { + fullName = url + } + return fullName +} + +func (r fileResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + d := r.f.FindDescriptorByName(field) + if d != nil { + if extd, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extd.Type(), nil + } + if fld, ok := d.(protoreflect.FieldDescriptor); ok && fld.IsExtension() { + return dynamicpb.NewExtensionType(fld), nil + } + return nil, protoregistry.NotFound + } + return r.deps.FindExtensionByName(field) +} + +func (r fileResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + ext := findExtension(r.f, message, field) + if ext != nil { + return ext.Type(), nil + } + return r.deps.FindExtensionByNumber(message, field) +} + +type filesResolver []File + +func (r filesResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + for _, f := range r { + if f.Path() == path { + return f, nil + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + for _, f := range r { + result := f.FindDescriptorByName(name) + if result != nil { + return result, nil + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + for _, f := range r { + d := f.FindDescriptorByName(message) + if d != nil { + if md, ok := d.(protoreflect.MessageDescriptor); ok { + return dynamicpb.NewMessageType(md), nil + } + return nil, protoregistry.NotFound + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + name := messageNameFromURL(url) + return r.FindMessageByName(protoreflect.FullName(name)) +} + +func (r filesResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + for _, f := range r { + d := f.FindDescriptorByName(field) + if d != nil { + if extd, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extd.Type(), nil + } + if fld, ok := d.(protoreflect.FieldDescriptor); ok && fld.IsExtension() { + return dynamicpb.NewExtensionType(fld), nil + } + return nil, protoregistry.NotFound + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + for _, f := range r { + ext := findExtension(f, message, field) + if ext != nil { + return ext.Type(), nil + } + } + return nil, protoregistry.NotFound +} + +type hasExtensionsAndMessages interface { + Messages() protoreflect.MessageDescriptors + Extensions() protoreflect.ExtensionDescriptors +} + +func findExtension(d hasExtensionsAndMessages, message protoreflect.FullName, field protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + for i := 0; i < d.Extensions().Len(); i++ { + if extType := isExtensionMatch(d.Extensions().Get(i), message, field); extType != nil { + return extType + } + } + + for i := 0; i < d.Messages().Len(); i++ { + if extType := findExtension(d.Messages().Get(i), message, field); extType != nil { + return extType + } + } + + return nil // could not be found +} + +func isExtensionMatch(ext protoreflect.ExtensionDescriptor, message protoreflect.FullName, field protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + if ext.Number() != field || ext.ContainingMessage().FullName() != message { + return nil + } + if extType, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok { + return extType + } + return dynamicpb.NewExtensionType(ext).TypeDescriptor() +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/linker.go b/vendor/github.com/bufbuild/protocompile/linker/linker.go new file mode 100644 index 00000000..d8d1c9a6 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/linker.go @@ -0,0 +1,183 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" +) + +// Link handles linking a parsed descriptor proto into a fully-linked descriptor. +// If the given parser.Result has imports, they must all be present in the given +// dependencies. +// +// The symbols value is optional and may be nil. If it is not nil, it must be the +// same instance used to create and link all of the given result's dependencies +// (or otherwise already have all dependencies imported). Otherwise, linking may +// fail with spurious errors resolving symbols. +// +// The handler value is used to report any link errors. If any such errors are +// reported, this function returns a non-nil error. The Result value returned +// also implements protoreflect.FileDescriptor. +// +// Note that linking does NOT interpret options. So options messages in the +// returned value have all values stored in UninterpretedOptions fields. +func Link(parsed parser.Result, dependencies Files, symbols *Symbols, handler *reporter.Handler) (Result, error) { + if symbols == nil { + symbols = &Symbols{} + } + prefix := parsed.FileDescriptorProto().GetPackage() + if prefix != "" { + prefix += "." + } + + for _, imp := range parsed.FileDescriptorProto().Dependency { + dep := dependencies.FindFileByPath(imp) + if dep == nil { + return nil, fmt.Errorf("dependencies is missing import %q", imp) + } + if err := symbols.Import(dep, handler); err != nil { + return nil, err + } + } + + r := &result{ + Result: parsed, + deps: dependencies, + descriptors: map[string]protoreflect.Descriptor{}, + usedImports: map[string]struct{}{}, + prefix: prefix, + optionQualifiedNames: map[ast.IdentValueNode]string{}, + } + + // First, we put all symbols into a single pool, which lets us ensure there + // are no duplicate symbols and will also let us resolve and revise all type + // references in next step. + if err := symbols.importResult(r, handler); err != nil { + return nil, err + } + + // After we've populated the pool, we can now try to resolve all type + // references. All references must be checked for correct type, any fields + // with enum types must be corrected (since we parse them as if they are + // message references since we don't actually know message or enum until + // link time), and references will be re-written to be fully-qualified + // references (e.g. start with a dot "."). + if err := r.resolveReferences(handler, symbols); err != nil { + return nil, err + } + + return r, handler.Error() +} + +// Result is the result of linking. This is a protoreflect.FileDescriptor, but +// with some additional methods for exposing additional information, such as the +// for accessing the input AST or file descriptor. +// +// It also provides Resolve* methods, for looking up enums, messages, and +// extensions that are available to the protobuf source file this result +// represents. An element is "available" if it meets any of the following +// criteria: +// 1. The element is defined in this file itself. +// 2. The element is defined in a file that is directly imported by this file. +// 3. The element is "available" to a file that is directly imported by this +// file as a public import. +// +// Other elements, even if in the transitive closure of this file, are not +// available and thus won't be returned by these methods. +type Result interface { + File + parser.Result + // ResolveEnumType returns an enum descriptor for the given named enum that + // is available in this file. If no such element is available or if the + // named element is not an enum, nil is returned. + ResolveEnumType(protoreflect.FullName) protoreflect.EnumDescriptor + // ResolveMessageType returns a message descriptor for the given named + // message that is available in this file. If no such element is available + // or if the named element is not a message, nil is returned. + ResolveMessageType(protoreflect.FullName) protoreflect.MessageDescriptor + // ResolveOptionsType returns a message descriptor for the given options + // type. This is like ResolveMessageType but searches the result's entire + // set of transitive dependencies without regard for visibility. If no + // such element is available or if the named element is not a message, nil + // is returned. + ResolveOptionsType(protoreflect.FullName) protoreflect.MessageDescriptor + // ResolveExtension returns an extension descriptor for the given named + // extension that is available in this file. If no such element is available + // or if the named element is not an extension, nil is returned. + ResolveExtension(protoreflect.FullName) protoreflect.ExtensionTypeDescriptor + // ResolveMessageLiteralExtensionName returns the fully qualified name for + // an identifier for extension field names in message literals. + ResolveMessageLiteralExtensionName(ast.IdentValueNode) string + // ValidateOptions runs some validation checks on the descriptor that can only + // be done after options are interpreted. Any errors or warnings encountered + // will be reported via the given handler. If any error is reported, this + // function returns a non-nil error. + ValidateOptions(handler *reporter.Handler) error + // CheckForUnusedImports is used to report warnings for unused imports. This + // should be called after options have been interpreted. Otherwise, the logic + // could incorrectly report imports as unused if the only symbol used were a + // custom option. + CheckForUnusedImports(handler *reporter.Handler) + // PopulateSourceCodeInfo is used to populate source code info for the file + // descriptor. This step requires that the underlying descriptor proto have + // its `source_code_info` field populated. This is typically a post-process + // step separate from linking, because computing source code info requires + // interpreting options (which is done after linking). + PopulateSourceCodeInfo() + + // CanonicalProto returns the file descriptor proto in a form that + // will be serialized in a canonical way. The "canonical" way matches + // the way that "protoc" emits option values, which is a way that + // mostly matches the way options are defined in source, including + // ordering and de-structuring. Unlike the FileDescriptorProto() method, this + // method is more expensive and results in a new descriptor proto + // being constructed with each call. + // + // The returned value will have all options (fields of the various + // descriptorpb.*Options message types) represented via unrecognized + // fields. So the returned value will serialize as desired, but it + // is otherwise not useful since all option values are treated as + // unknown. + CanonicalProto() *descriptorpb.FileDescriptorProto + + // RemoveAST drops the AST information from this result. + RemoveAST() +} + +// ErrorUnusedImport may be passed to a warning reporter when an unused +// import is detected. The error the reporter receives will be wrapped +// with source position that indicates the file and line where the import +// statement appeared. +type ErrorUnusedImport interface { + error + UnusedImport() string +} + +type errUnusedImport string + +func (e errUnusedImport) Error() string { + return fmt.Sprintf("import %q not used", string(e)) +} + +func (e errUnusedImport) UnusedImport() string { + return string(e) +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go b/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go new file mode 100644 index 00000000..89475e69 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go @@ -0,0 +1,35 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build appengine || gopherjs || purego +// +build appengine gopherjs purego + +// NB: other environments where unsafe is inappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package linker + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func pathKey(p protoreflect.SourcePath) interface{} { + rv := reflect.ValueOf(p) + arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem()) + array := reflect.New(arrayType).Elem() + reflect.Copy(array, rv) + return array.Interface() +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go b/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go new file mode 100644 index 00000000..cf0d0c26 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go @@ -0,0 +1,36 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !appengine && !gopherjs && !purego +// +build !appengine,!gopherjs,!purego + +// NB: other environments where unsafe is inappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package linker + +import ( + "reflect" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +var pathElementType = reflect.TypeOf(protoreflect.SourcePath{}).Elem() + +func pathKey(p protoreflect.SourcePath) interface{} { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(reflect.ValueOf(&p).Pointer())) + array := reflect.NewAt(reflect.ArrayOf(hdr.Len, pathElementType), unsafe.Pointer(hdr.Data)) + return array.Elem().Interface() +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/resolve.go b/vendor/github.com/bufbuild/protocompile/linker/resolve.go new file mode 100644 index 00000000..485aba05 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/resolve.go @@ -0,0 +1,825 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/walk" +) + +func (r *result) ResolveMessageType(name protoreflect.FullName) protoreflect.MessageDescriptor { + d := r.resolveElement(name) + if md, ok := d.(protoreflect.MessageDescriptor); ok { + return md + } + return nil +} + +func (r *result) ResolveOptionsType(name protoreflect.FullName) protoreflect.MessageDescriptor { + d, _ := ResolverFromFile(r).FindDescriptorByName(name) + md, _ := d.(protoreflect.MessageDescriptor) + if md != nil && md.ParentFile() != nil { + r.markUsed(md.ParentFile().Path()) + } + return md +} + +func (r *result) ResolveEnumType(name protoreflect.FullName) protoreflect.EnumDescriptor { + d := r.resolveElement(name) + if ed, ok := d.(protoreflect.EnumDescriptor); ok { + return ed + } + return nil +} + +func (r *result) ResolveExtension(name protoreflect.FullName) protoreflect.ExtensionTypeDescriptor { + d := r.resolveElement(name) + if ed, ok := d.(protoreflect.ExtensionDescriptor); ok { + if !ed.IsExtension() { + return nil + } + if td, ok := ed.(protoreflect.ExtensionTypeDescriptor); ok { + return td + } + return dynamicpb.NewExtensionType(ed).TypeDescriptor() + } + return nil +} + +func (r *result) ResolveMessageLiteralExtensionName(node ast.IdentValueNode) string { + return r.optionQualifiedNames[node] +} + +func (r *result) resolveElement(name protoreflect.FullName) protoreflect.Descriptor { + if len(name) > 0 && name[0] == '.' { + name = name[1:] + } + importedFd, res := resolveElement(r, name, false, nil) + if importedFd != nil { + r.markUsed(importedFd.Path()) + } + return res +} + +func (r *result) markUsed(importPath string) { + r.usedImports[importPath] = struct{}{} +} + +func (r *result) CheckForUnusedImports(handler *reporter.Handler) { + fd := r.FileDescriptorProto() + file, _ := r.FileNode().(*ast.FileNode) + for i, dep := range fd.Dependency { + if _, ok := r.usedImports[dep]; !ok { + isPublic := false + // it's fine if it's a public import + for _, j := range fd.PublicDependency { + if i == int(j) { + isPublic = true + break + } + } + if isPublic { + continue + } + pos := ast.UnknownPos(fd.GetName()) + if file != nil { + for _, decl := range file.Decls { + imp, ok := decl.(*ast.ImportNode) + if ok && imp.Name.AsString() == dep { + pos = file.NodeInfo(imp).Start() + } + } + } + handler.HandleWarningWithPos(pos, errUnusedImport(dep)) + } + } +} + +func resolveElement(f File, fqn protoreflect.FullName, publicImportsOnly bool, checked []string) (imported File, d protoreflect.Descriptor) { + path := f.Path() + for _, str := range checked { + if str == path { + // already checked + return nil, nil + } + } + checked = append(checked, path) + + r := resolveElementInFile(fqn, f) + if r != nil { + // not imported, but present in f + return nil, r + } + + // When publicImportsOnly = false, we are searching only directly imported symbols. But + // we also need to search transitive public imports due to semantics of public imports. + for i := 0; i < f.Imports().Len(); i++ { + dep := f.Imports().Get(i) + if dep.IsPublic || !publicImportsOnly { + depFile := f.FindImportByPath(dep.Path()) + _, d := resolveElement(depFile, fqn, true, checked) + if d != nil { + return depFile, d + } + } + } + + return nil, nil +} + +func descriptorTypeWithArticle(d protoreflect.Descriptor) string { + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return "a message" + case protoreflect.FieldDescriptor: + if d.IsExtension() { + return "an extension" + } + return "a field" + case protoreflect.OneofDescriptor: + return "a oneof" + case protoreflect.EnumDescriptor: + return "an enum" + case protoreflect.EnumValueDescriptor: + return "an enum value" + case protoreflect.ServiceDescriptor: + return "a service" + case protoreflect.MethodDescriptor: + return "a method" + case protoreflect.FileDescriptor: + return "a file" + default: + // shouldn't be possible + return fmt.Sprintf("a %T", d) + } +} + +func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error { + // first create the full descriptor hierarchy + fd := r.FileDescriptorProto() + prefix := "" + if fd.GetPackage() != "" { + prefix = fd.GetPackage() + "." + } + r.imports = r.createImports() + r.messages = r.createMessages(prefix, r, fd.MessageType) + r.enums = r.createEnums(prefix, r, fd.EnumType) + r.extensions = r.createExtensions(prefix, r, fd.Extension) + r.services = r.createServices(prefix, fd.Service) + + // then resolve symbol references + scopes := []scope{fileScope(r)} + if fd.Options != nil { + if err := r.resolveOptions(handler, "file", protoreflect.FullName(fd.GetName()), fd.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + + return walk.DescriptorsEnterAndExit(r, + func(d protoreflect.Descriptor) error { + fqn := d.FullName() + switch d := d.(type) { + case *msgDescriptor: + // Strangely, when protoc resolves extension names, it uses the *enclosing* scope + // instead of the message's scope. So if the message contains an extension named "i", + // an option cannot refer to it as simply "i" but must qualify it (at a minimum "Msg.i"). + // So we don't add this messages scope to our scopes slice until *after* we do options. + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "message", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + scopes = append(scopes, messageScope(r, fqn)) // push new scope on entry + // walk only visits descriptors, so we need to loop over extension ranges ourselves + for _, er := range d.proto.ExtensionRange { + if er.Options != nil { + erName := protoreflect.FullName(fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1)) + if err := r.resolveOptions(handler, "extension range", erName, er.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + } + case *extTypeDescriptor: + if d.field.proto.Options != nil { + if err := r.resolveOptions(handler, "extension", fqn, d.field.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + if err := resolveFieldTypes(d.field, handler, s, scopes); err != nil { + return err + } + if r.Syntax() == protoreflect.Proto3 && !allowedProto3Extendee(d.field.proto.GetExtendee()) { + file := r.FileNode() + node := r.FieldNode(d.field.proto).FieldExtendee() + if err := handler.HandleErrorf(file.NodeInfo(node).Start(), "extend blocks in proto3 can only be used to define custom options"); err != nil { + return err + } + } + case *fldDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "field", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + if err := resolveFieldTypes(d, handler, s, scopes); err != nil { + return err + } + case *oneofDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "oneof", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + case *enumDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "enum", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + case *enValDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "enum value", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + case *svcDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "service", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + // not a message, but same scoping rules for nested elements as if it were + scopes = append(scopes, messageScope(r, fqn)) // push new scope on entry + case *mtdDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "method", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { + return err + } + } + if err := resolveMethodTypes(d, handler, scopes); err != nil { + return err + } + } + return nil + }, + func(d protoreflect.Descriptor) error { + switch d.(type) { + case protoreflect.MessageDescriptor, protoreflect.ServiceDescriptor: + // pop message scope on exit + scopes = scopes[:len(scopes)-1] + } + return nil + }) +} + +var allowedProto3Extendees = map[string]struct{}{ + ".google.protobuf.FileOptions": {}, + ".google.protobuf.MessageOptions": {}, + ".google.protobuf.FieldOptions": {}, + ".google.protobuf.OneofOptions": {}, + ".google.protobuf.ExtensionRangeOptions": {}, + ".google.protobuf.EnumOptions": {}, + ".google.protobuf.EnumValueOptions": {}, + ".google.protobuf.ServiceOptions": {}, + ".google.protobuf.MethodOptions": {}, +} + +func allowedProto3Extendee(n string) bool { + if n == "" { + // not an extension, allowed + return true + } + _, ok := allowedProto3Extendees[n] + return ok +} + +func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, s *Symbols, scopes []scope) error { + r := f.file + fld := f.proto + file := r.FileNode() + node := r.FieldNode(fld) + scope := fmt.Sprintf("field %s", f.fqn) + if fld.GetExtendee() != "" { + scope := fmt.Sprintf("extension %s", f.fqn) + dsc := r.resolve(fld.GetExtendee(), false, scopes) + if dsc == nil { + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()).Start(), "unknown extendee type %s", fld.GetExtendee()) + } + if isSentinelDescriptor(dsc) { + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()).Start(), "unknown extendee type %s; resolved to %s which is not defined; consider using a leading dot", fld.GetExtendee(), dsc.FullName()) + } + extd, ok := dsc.(protoreflect.MessageDescriptor) + if !ok { + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()).Start(), "extendee is invalid: %s is %s, not a message", dsc.FullName(), descriptorTypeWithArticle(dsc)) + } + f.extendee = extd + extendeeName := "." + string(dsc.FullName()) + if fld.GetExtendee() != extendeeName { + fld.Extendee = proto.String(extendeeName) + } + // make sure the tag number is in range + found := false + tag := protoreflect.FieldNumber(fld.GetNumber()) + for i := 0; i < extd.ExtensionRanges().Len(); i++ { + rng := extd.ExtensionRanges().Get(i) + if tag >= rng[0] && tag < rng[1] { + found = true + break + } + } + if !found { + if err := handler.HandleErrorf(file.NodeInfo(node.FieldTag()).Start(), "%s: tag %d is not in valid range for extended type %s", scope, tag, dsc.FullName()); err != nil { + return err + } + } else { + // make sure tag is not a duplicate + if err := s.AddExtension(packageFor(dsc), dsc.FullName(), tag, file.NodeInfo(node.FieldTag()).Start(), handler); err != nil { + return err + } + } + } else if f.proto.OneofIndex != nil { + parent := f.parent.(protoreflect.MessageDescriptor) //nolint:errcheck + index := int(f.proto.GetOneofIndex()) + f.oneof = parent.Oneofs().Get(index) + } + + if fld.GetTypeName() == "" { + // scalar type; no further resolution required + return nil + } + + dsc := r.resolve(fld.GetTypeName(), true, scopes) + if dsc == nil { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: unknown type %s", scope, fld.GetTypeName()) + } + if isSentinelDescriptor(dsc) { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: unknown type %s; resolved to %s which is not defined; consider using a leading dot", scope, fld.GetTypeName(), dsc.FullName()) + } + switch dsc := dsc.(type) { + case protoreflect.MessageDescriptor: + if dsc.IsMapEntry() { + isValid := false + switch node.(type) { + case *ast.MapFieldNode: + // We have an AST for this file and can see this field is from a map declaration + isValid = true + case ast.NoSourceNode: + // We don't have an AST for the file (it came from a provided descriptor). So we + // need to validate that it's not an illegal reference. To be valid, the field + // must be repeated and the entry type must be nested in the same enclosing + // message as the field. + isValid = isValidMap(f, dsc) + if isValid && f.index > 0 { + // also make sure there are no earlier fields that are valid for this map entry + flds := f.Parent().(protoreflect.MessageDescriptor).Fields() + for i := 0; i < f.index; i++ { + if isValidMap(flds.Get(i), dsc) { + isValid = false + break + } + } + } + } + if !isValid { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: %s is a synthetic map entry and may not be referenced explicitly", scope, dsc.FullName()) + } + } + typeName := "." + string(dsc.FullName()) + if fld.GetTypeName() != typeName { + fld.TypeName = proto.String(typeName) + } + if fld.Type == nil { + // if type was tentatively unset, we now know it's actually a message + fld.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_GROUP { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: descriptor proto indicates type %v but should be %v", scope, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) + } + f.msgType = dsc + case protoreflect.EnumDescriptor: + proto3 := r.Syntax() == protoreflect.Proto3 + enumIsProto3 := dsc.Syntax() == protoreflect.Proto3 + if fld.GetExtendee() == "" && proto3 && !enumIsProto3 { + // fields in a proto3 message cannot refer to proto2 enums + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: cannot use proto2 enum %s in a proto3 message", scope, fld.GetTypeName()) + } + typeName := "." + string(dsc.FullName()) + if fld.GetTypeName() != typeName { + fld.TypeName = proto.String(typeName) + } + if fld.Type == nil { + // the type was tentatively unset, but now we know it's actually an enum + fld.Type = descriptorpb.FieldDescriptorProto_TYPE_ENUM.Enum() + } else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_ENUM { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: descriptor proto indicates type %v but should be %v", scope, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_ENUM) + } + f.enumType = dsc + default: + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: invalid type: %s is %s, not a message or enum", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)) + } + return nil +} + +func packageFor(dsc protoreflect.Descriptor) protoreflect.FullName { + if dsc.ParentFile() != nil { + return dsc.ParentFile().Package() + } + // Can't access package? Make a best effort guess. + return dsc.FullName().Parent() +} + +func isValidMap(mapField protoreflect.FieldDescriptor, mapEntry protoreflect.MessageDescriptor) bool { + return !mapField.IsExtension() && + mapEntry.Parent() == mapField.ContainingMessage() && + mapField.Cardinality() == protoreflect.Repeated && + string(mapEntry.Name()) == internal.InitCap(internal.JSONName(string(mapField.Name())))+"Entry" +} + +func resolveMethodTypes(m *mtdDescriptor, handler *reporter.Handler, scopes []scope) error { + scope := fmt.Sprintf("method %s", m.fqn) + r := m.file + mtd := m.proto + file := r.FileNode() + node := r.MethodNode(mtd) + dsc := r.resolve(mtd.GetInputType(), false, scopes) + if dsc == nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()).Start(), "%s: unknown request type %s", scope, mtd.GetInputType()); err != nil { + return err + } + } else if isSentinelDescriptor(dsc) { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()).Start(), "%s: unknown request type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetInputType(), dsc.FullName()); err != nil { + return err + } + } else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()).Start(), "%s: invalid request type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { + return err + } + } else { + typeName := "." + string(dsc.FullName()) + if mtd.GetInputType() != typeName { + mtd.InputType = proto.String(typeName) + } + m.inputType = msg + } + + // TODO: make input and output type resolution more DRY + dsc = r.resolve(mtd.GetOutputType(), false, scopes) + if dsc == nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()).Start(), "%s: unknown response type %s", scope, mtd.GetOutputType()); err != nil { + return err + } + } else if isSentinelDescriptor(dsc) { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()).Start(), "%s: unknown response type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetOutputType(), dsc.FullName()); err != nil { + return err + } + } else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()).Start(), "%s: invalid response type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { + return err + } + } else { + typeName := "." + string(dsc.FullName()) + if mtd.GetOutputType() != typeName { + mtd.OutputType = proto.String(typeName) + } + m.outputType = msg + } + + return nil +} + +func (r *result) resolveOptions(handler *reporter.Handler, elemType string, elemName protoreflect.FullName, opts []*descriptorpb.UninterpretedOption, scopes []scope) error { + mc := &internal.MessageContext{ + File: r, + ElementName: string(elemName), + ElementType: elemType, + } + file := r.FileNode() +opts: + for _, opt := range opts { + // resolve any extension names found in option names + for _, nm := range opt.Name { + if nm.GetIsExtension() { + node := r.OptionNamePartNode(nm) + fqn, err := r.resolveExtensionName(nm.GetNamePart(), scopes) + if err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node).Start(), "%v%v", mc, err); err != nil { + return err + } + continue opts + } + nm.NamePart = proto.String(fqn) + } + } + // also resolve any extension names found inside message literals in option values + mc.Option = opt + optVal := r.OptionNode(opt).GetValue() + if err := r.resolveOptionValue(handler, mc, optVal, scopes); err != nil { + return err + } + mc.Option = nil + } + return nil +} + +func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.MessageContext, val ast.ValueNode, scopes []scope) error { + optVal := val.Value() + switch optVal := optVal.(type) { + case []ast.ValueNode: + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + for i, v := range optVal { + mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, i) + if err := r.resolveOptionValue(handler, mc, v, scopes); err != nil { + return err + } + } + case []*ast.MessageFieldNode: + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + for _, fld := range optVal { + // check for extension name + if fld.Name.IsExtension() { + // Confusingly, an extension reference inside a message literal cannot refer to + // elements in the same enclosing message without a qualifier. Basically, we + // treat this as if there were no message scopes, so only the package name is + // used for resolving relative references. (Inconsistent protoc behavior, but + // likely due to how it re-uses C++ text format implementation, and normal text + // format doesn't expect that kind of relative reference.) + scopes := scopes[:1] // first scope is file, the rest are enclosing messages + fqn, err := r.resolveExtensionName(string(fld.Name.Name.AsIdentifier()), scopes) + if err != nil { + if err := handler.HandleErrorf(r.FileNode().NodeInfo(fld.Name.Name).Start(), "%v%v", mc, err); err != nil { + return err + } + } else { + r.optionQualifiedNames[fld.Name.Name] = fqn + } + } + + // recurse into value + mc.OptAggPath = origPath + if origPath != "" { + mc.OptAggPath += "." + } + if fld.Name.IsExtension() { + mc.OptAggPath = fmt.Sprintf("%s[%s]", mc.OptAggPath, string(fld.Name.Name.AsIdentifier())) + } else { + mc.OptAggPath = fmt.Sprintf("%s%s", mc.OptAggPath, string(fld.Name.Name.AsIdentifier())) + } + + if err := r.resolveOptionValue(handler, mc, fld.Val, scopes); err != nil { + return err + } + } + } + return nil +} + +func (r *result) resolveExtensionName(name string, scopes []scope) (string, error) { + dsc := r.resolve(name, false, scopes) + if dsc == nil { + return "", fmt.Errorf("unknown extension %s", name) + } + if isSentinelDescriptor(dsc) { + return "", fmt.Errorf("unknown extension %s; resolved to %s which is not defined; consider using a leading dot", name, dsc.FullName()) + } + if ext, ok := dsc.(protoreflect.FieldDescriptor); !ok { + return "", fmt.Errorf("invalid extension: %s is %s, not an extension", name, descriptorTypeWithArticle(dsc)) + } else if !ext.IsExtension() { + return "", fmt.Errorf("invalid extension: %s is a field but not an extension", name) + } + return string("." + dsc.FullName()), nil +} + +func (r *result) resolve(name string, onlyTypes bool, scopes []scope) protoreflect.Descriptor { + if strings.HasPrefix(name, ".") { + // already fully-qualified + return r.resolveElement(protoreflect.FullName(name[1:])) + } + // unqualified, so we look in the enclosing (last) scope first and move + // towards outermost (first) scope, trying to resolve the symbol + pos := strings.IndexByte(name, '.') + firstName := name + if pos > 0 { + firstName = name[:pos] + } + var bestGuess protoreflect.Descriptor + for i := len(scopes) - 1; i >= 0; i-- { + d := scopes[i](firstName, name) + if d != nil { + // In `protoc`, it will skip a match of the wrong type and move on + // to the next scope, but only if the reference is unqualified. So + // we mirror that behavior here. When we skip and move on, we go + // ahead and save the match of the wrong type so we can at least use + // it to construct a better error in the event that we don't find + // any match of the right type. + if !onlyTypes || isType(d) || firstName != name { + return d + } + if bestGuess == nil { + bestGuess = d + } + } + } + // we return best guess, even though it was not an allowed kind of + // descriptor, so caller can print a better error message (e.g. + // indicating that the name was found but that it's the wrong type) + return bestGuess +} + +func isType(d protoreflect.Descriptor) bool { + switch d.(type) { + case protoreflect.MessageDescriptor, protoreflect.EnumDescriptor: + return true + } + return false +} + +// scope represents a lexical scope in a proto file in which messages and enums +// can be declared. +type scope func(firstName, fullName string) protoreflect.Descriptor + +func fileScope(r *result) scope { + // we search symbols in this file, but also symbols in other files that have + // the same package as this file or a "parent" package (in protobuf, + // packages are a hierarchy like C++ namespaces) + prefixes := internal.CreatePrefixList(r.FileDescriptorProto().GetPackage()) + querySymbol := func(n string) protoreflect.Descriptor { + return r.resolveElement(protoreflect.FullName(n)) + } + return func(firstName, fullName string) protoreflect.Descriptor { + for _, prefix := range prefixes { + var n1, n string + if prefix == "" { + // exhausted all prefixes, so it must be in this one + n1, n = fullName, fullName + } else { + n = prefix + "." + fullName + n1 = prefix + "." + firstName + } + d := resolveElementRelative(n1, n, querySymbol) + if d != nil { + return d + } + } + return nil + } +} + +func messageScope(r *result, messageName protoreflect.FullName) scope { + querySymbol := func(n string) protoreflect.Descriptor { + return resolveElementInFile(protoreflect.FullName(n), r) + } + return func(firstName, fullName string) protoreflect.Descriptor { + n1 := string(messageName) + "." + firstName + n := string(messageName) + "." + fullName + return resolveElementRelative(n1, n, querySymbol) + } +} + +func resolveElementRelative(firstName, fullName string, query func(name string) protoreflect.Descriptor) protoreflect.Descriptor { + d := query(firstName) + if d == nil { + return nil + } + if firstName == fullName { + return d + } + if !isAggregateDescriptor(d) { + // can't possibly find the rest of full name if + // the first name indicated a leaf descriptor + return nil + } + d = query(fullName) + if d == nil { + return newSentinelDescriptor(fullName) + } + return d +} + +func resolveElementInFile(name protoreflect.FullName, f File) protoreflect.Descriptor { + d := f.FindDescriptorByName(name) + if d != nil { + return d + } + + if matchesPkgNamespace(name, f.Package()) { + // this sentinel means the name is a valid namespace but + // does not refer to a descriptor + return newSentinelDescriptor(string(name)) + } + return nil +} + +func matchesPkgNamespace(fqn, pkg protoreflect.FullName) bool { + if pkg == "" { + return false + } + if fqn == pkg { + return true + } + if len(pkg) > len(fqn) && strings.HasPrefix(string(pkg), string(fqn)) { + // if char after fqn is a dot, then fqn is a namespace + if pkg[len(fqn)] == '.' { + return true + } + } + return false +} + +func isAggregateDescriptor(d protoreflect.Descriptor) bool { + if isSentinelDescriptor(d) { + // this indicates the name matched a package, not a + // descriptor, but a package is an aggregate, so + // we return true + return true + } + switch d.(type) { + case protoreflect.MessageDescriptor, protoreflect.EnumDescriptor, protoreflect.ServiceDescriptor: + return true + default: + return false + } +} + +func isSentinelDescriptor(d protoreflect.Descriptor) bool { + _, ok := d.(*sentinelDescriptor) + return ok +} + +func newSentinelDescriptor(name string) protoreflect.Descriptor { + return &sentinelDescriptor{name: name} +} + +// sentinelDescriptor is a placeholder descriptor. It is used instead of nil to +// distinguish between two situations: +// 1. The given name could not be found. +// 2. The given name *cannot* be a valid result so stop searching. +// +// In these cases, attempts to resolve an element name will return nil for the +// first case and will return a sentinelDescriptor in the second. The sentinel +// contains the fully-qualified name which caused the search to stop (which may +// be a prefix of the actual name being resolved). +type sentinelDescriptor struct { + protoreflect.Descriptor + name string +} + +func (p *sentinelDescriptor) ParentFile() protoreflect.FileDescriptor { + return nil +} + +func (p *sentinelDescriptor) Parent() protoreflect.Descriptor { + return nil +} + +func (p *sentinelDescriptor) Index() int { + return 0 +} + +func (p *sentinelDescriptor) Syntax() protoreflect.Syntax { + return 0 +} + +func (p *sentinelDescriptor) Name() protoreflect.Name { + return protoreflect.Name(p.name) +} + +func (p *sentinelDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(p.name) +} + +func (p *sentinelDescriptor) IsPlaceholder() bool { + return false +} + +func (p *sentinelDescriptor) Options() protoreflect.ProtoMessage { + return nil +} + +var _ protoreflect.Descriptor = (*sentinelDescriptor)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/linker/symbols.go b/vendor/github.com/bufbuild/protocompile/linker/symbols.go new file mode 100644 index 00000000..54923218 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/symbols.go @@ -0,0 +1,567 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "strings" + "sync" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/walk" +) + +const unknownFilePath = "" + +// Symbols is a symbol table that maps names for all program elements to their +// location in source. It also tracks extension tag numbers. This can be used +// to enforce uniqueness for symbol names and tag numbers across many files and +// many link operations. +// +// This type is thread-safe. +type Symbols struct { + pkgTrie packageSymbols +} + +type packageSymbols struct { + mu sync.RWMutex + children map[protoreflect.FullName]*packageSymbols + files map[protoreflect.FileDescriptor]struct{} + symbols map[protoreflect.FullName]symbolEntry + exts map[extNumber]ast.SourcePos +} + +type extNumber struct { + extendee protoreflect.FullName + tag protoreflect.FieldNumber +} + +type symbolEntry struct { + pos ast.SourcePos + isEnumValue bool + isPackage bool +} + +// Import populates the symbol table with all symbols/elements and extension +// tags present in the given file descriptor. If s is nil or if fd has already +// been imported into s, this returns immediately without doing anything. If any +// collisions in symbol names or extension tags are identified, an error will be +// returned and the symbol table will not be updated. +func (s *Symbols) Import(fd protoreflect.FileDescriptor, handler *reporter.Handler) error { + if s == nil { + return nil + } + + if f, ok := fd.(*file); ok { + // unwrap any file instance + fd = f.FileDescriptor + } + + var pkgPos ast.SourcePos + if res, ok := fd.(*result); ok { + pkgPos = packageNameStart(res) + } else { + pkgPos = sourcePositionForPackage(fd) + } + pkg, err := s.importPackages(pkgPos, fd.Package(), handler) + if err != nil || pkg == nil { + return err + } + + pkg.mu.RLock() + _, alreadyImported := pkg.files[fd] + pkg.mu.RUnlock() + + if alreadyImported { + return nil + } + + for i := 0; i < fd.Imports().Len(); i++ { + if err := s.Import(fd.Imports().Get(i).FileDescriptor, handler); err != nil { + return err + } + } + + if res, ok := fd.(*result); ok && res.hasSource() { + return s.importResultWithExtensions(pkg, res, handler) + } + + return s.importFileWithExtensions(pkg, fd, handler) +} + +func (s *Symbols) importFileWithExtensions(pkg *packageSymbols, fd protoreflect.FileDescriptor, handler *reporter.Handler) error { + imported, err := pkg.importFile(fd, handler) + if err != nil { + return err + } + if !imported { + // nothing else to do + return nil + } + + return walk.Descriptors(fd, func(d protoreflect.Descriptor) error { + fld, ok := d.(protoreflect.FieldDescriptor) + if !ok || !fld.IsExtension() { + return nil + } + pos := sourcePositionForNumber(fld) + extendee := fld.ContainingMessage() + if err := s.AddExtension(packageFor(extendee), extendee.FullName(), fld.Number(), pos, handler); err != nil { + return err + } + return nil + }) +} + +func (s *packageSymbols) importFile(fd protoreflect.FileDescriptor, handler *reporter.Handler) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.files[fd]; ok { + // have to double-check if it's already imported, in case + // it was added after above read-locked check + return false, nil + } + + // first pass: check for conflicts + if err := s.checkFileLocked(fd, handler); err != nil { + return false, err + } + if err := handler.Error(); err != nil { + return false, err + } + + // second pass: commit all symbols + s.commitFileLocked(fd) + + return true, nil +} + +func (s *Symbols) importPackages(pkgPos ast.SourcePos, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { + if pkg == "" { + return &s.pkgTrie, nil + } + + parts := strings.Split(string(pkg), ".") + for i := 1; i < len(parts); i++ { + parts[i] = parts[i-1] + "." + parts[i] + } + + cur := &s.pkgTrie + for _, p := range parts { + var err error + cur, err = cur.importPackage(pkgPos, protoreflect.FullName(p), handler) + if err != nil { + return nil, err + } + if cur == nil { + return nil, nil + } + } + + return cur, nil +} + +func (s *packageSymbols) importPackage(pkgPos ast.SourcePos, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { + s.mu.RLock() + existing, ok := s.symbols[pkg] + var child *packageSymbols + if ok && existing.isPackage { + child = s.children[pkg] + } + s.mu.RUnlock() + + if ok && existing.isPackage { + // package already exists + return child, nil + } else if ok { + return nil, reportSymbolCollision(pkgPos, pkg, false, existing, handler) + } + + s.mu.Lock() + defer s.mu.Unlock() + // have to double-check in case it was added while upgrading to write lock + existing, ok = s.symbols[pkg] + if ok && existing.isPackage { + // package already exists + return s.children[pkg], nil + } else if ok { + return nil, reportSymbolCollision(pkgPos, pkg, false, existing, handler) + } + if s.symbols == nil { + s.symbols = map[protoreflect.FullName]symbolEntry{} + } + s.symbols[pkg] = symbolEntry{pos: pkgPos, isPackage: true} + child = &packageSymbols{} + if s.children == nil { + s.children = map[protoreflect.FullName]*packageSymbols{} + } + s.children[pkg] = child + return child, nil +} + +func (s *Symbols) getPackage(pkg protoreflect.FullName) *packageSymbols { + if pkg == "" { + return &s.pkgTrie + } + + parts := strings.Split(string(pkg), ".") + for i := 1; i < len(parts); i++ { + parts[i] = parts[i-1] + "." + parts[i] + } + + cur := &s.pkgTrie + for _, p := range parts { + cur.mu.RLock() + next := cur.children[protoreflect.FullName(p)] + cur.mu.RUnlock() + + if next == nil { + return nil + } + cur = next + } + + return cur +} + +func reportSymbolCollision(pos ast.SourcePos, fqn protoreflect.FullName, additionIsEnumVal bool, existing symbolEntry, handler *reporter.Handler) error { + // because of weird scoping for enum values, provide more context in error message + // if this conflict is with an enum value + var isPkg, suffix string + if additionIsEnumVal || existing.isEnumValue { + suffix = "; protobuf uses C++ scoping rules for enum values, so they exist in the scope enclosing the enum" + } + if existing.isPackage { + isPkg = " as a package" + } + orig := existing.pos + conflict := pos + if posLess(conflict, orig) { + orig, conflict = conflict, orig + } + return handler.HandleErrorf(conflict, "symbol %q already defined%s at %v%s", fqn, isPkg, orig, suffix) +} + +func posLess(a, b ast.SourcePos) bool { + if a.Filename == b.Filename { + if a.Line == b.Line { + return a.Col < b.Col + } + return a.Line < b.Line + } + return false +} + +func (s *packageSymbols) checkFileLocked(f protoreflect.FileDescriptor, handler *reporter.Handler) error { + return walk.Descriptors(f, func(d protoreflect.Descriptor) error { + pos := sourcePositionFor(d) + if existing, ok := s.symbols[d.FullName()]; ok { + _, isEnumVal := d.(protoreflect.EnumValueDescriptor) + if err := reportSymbolCollision(pos, d.FullName(), isEnumVal, existing, handler); err != nil { + return err + } + } + return nil + }) +} + +func sourcePositionForPackage(fd protoreflect.FileDescriptor) ast.SourcePos { + loc := fd.SourceLocations().ByPath([]int32{internal.FilePackageTag}) + if isZeroLoc(loc) { + return ast.UnknownPos(fd.Path()) + } + return ast.SourcePos{ + Filename: fd.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + } +} + +func sourcePositionFor(d protoreflect.Descriptor) ast.SourcePos { + file := d.ParentFile() + if file == nil { + return ast.UnknownPos(unknownFilePath) + } + path, ok := computePath(d) + if !ok { + return ast.UnknownPos(file.Path()) + } + namePath := path + switch d.(type) { + case protoreflect.FieldDescriptor: + namePath = append(namePath, internal.FieldNameTag) + case protoreflect.MessageDescriptor: + namePath = append(namePath, internal.MessageNameTag) + case protoreflect.OneofDescriptor: + namePath = append(namePath, internal.OneOfNameTag) + case protoreflect.EnumDescriptor: + namePath = append(namePath, internal.EnumNameTag) + case protoreflect.EnumValueDescriptor: + namePath = append(namePath, internal.EnumValNameTag) + case protoreflect.ServiceDescriptor: + namePath = append(namePath, internal.ServiceNameTag) + case protoreflect.MethodDescriptor: + namePath = append(namePath, internal.MethodNameTag) + default: + // NB: shouldn't really happen, but just in case fall back to path to + // descriptor, sans name field + } + loc := file.SourceLocations().ByPath(namePath) + if isZeroLoc(loc) { + loc = file.SourceLocations().ByPath(path) + if isZeroLoc(loc) { + return ast.UnknownPos(file.Path()) + } + } + return ast.SourcePos{ + Filename: file.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + } +} + +func sourcePositionForNumber(fd protoreflect.FieldDescriptor) ast.SourcePos { + file := fd.ParentFile() + if file == nil { + return ast.UnknownPos(unknownFilePath) + } + path, ok := computePath(fd) + if !ok { + return ast.UnknownPos(file.Path()) + } + numberPath := path + numberPath = append(numberPath, internal.FieldNumberTag) + loc := file.SourceLocations().ByPath(numberPath) + if isZeroLoc(loc) { + loc = file.SourceLocations().ByPath(path) + if isZeroLoc(loc) { + return ast.UnknownPos(file.Path()) + } + } + return ast.SourcePos{ + Filename: file.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + } +} + +func isZeroLoc(loc protoreflect.SourceLocation) bool { + return loc.Path == nil && + loc.StartLine == 0 && + loc.StartColumn == 0 && + loc.EndLine == 0 && + loc.EndColumn == 0 +} + +func (s *packageSymbols) commitFileLocked(f protoreflect.FileDescriptor) { + if s.symbols == nil { + s.symbols = map[protoreflect.FullName]symbolEntry{} + } + if s.exts == nil { + s.exts = map[extNumber]ast.SourcePos{} + } + _ = walk.Descriptors(f, func(d protoreflect.Descriptor) error { + pos := sourcePositionFor(d) + name := d.FullName() + _, isEnumValue := d.(protoreflect.EnumValueDescriptor) + s.symbols[name] = symbolEntry{pos: pos, isEnumValue: isEnumValue} + return nil + }) + + if s.files == nil { + s.files = map[protoreflect.FileDescriptor]struct{}{} + } + s.files[f] = struct{}{} +} + +func (s *Symbols) importResultWithExtensions(pkg *packageSymbols, r *result, handler *reporter.Handler) error { + imported, err := pkg.importResult(r, handler) + if err != nil { + return err + } + if !imported { + // nothing else to do + return nil + } + + return walk.Descriptors(r, func(d protoreflect.Descriptor) error { + fd, ok := d.(*extTypeDescriptor) + if !ok { + return nil + } + file := r.FileNode() + node := r.FieldNode(fd.FieldDescriptorProto()) + pos := file.NodeInfo(node.FieldTag()).Start() + extendee := fd.ContainingMessage() + if err := s.AddExtension(packageFor(extendee), extendee.FullName(), fd.Number(), pos, handler); err != nil { + return err + } + + return nil + }) +} + +func (s *Symbols) importResult(r *result, handler *reporter.Handler) error { + pkg, err := s.importPackages(packageNameStart(r), r.Package(), handler) + if err != nil || pkg == nil { + return err + } + _, err = pkg.importResult(r, handler) + return err +} + +func (s *packageSymbols) importResult(r *result, handler *reporter.Handler) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.files[r]; ok { + // already imported + return false, nil + } + + // first pass: check for conflicts + if err := s.checkResultLocked(r, handler); err != nil { + return false, err + } + if err := handler.Error(); err != nil { + return false, err + } + + // second pass: commit all symbols + s.commitResultLocked(r) + + return true, nil +} + +func (s *packageSymbols) checkResultLocked(r *result, handler *reporter.Handler) error { + resultSyms := map[protoreflect.FullName]symbolEntry{} + return walk.DescriptorProtos(r.FileDescriptorProto(), func(fqn protoreflect.FullName, d proto.Message) error { + _, isEnumVal := d.(*descriptorpb.EnumValueDescriptorProto) + file := r.FileNode() + node := r.Node(d) + pos := nameStart(file, node) + // check symbols already in this symbol table + if existing, ok := s.symbols[fqn]; ok { + if err := reportSymbolCollision(pos, fqn, isEnumVal, existing, handler); err != nil { + return err + } + } + + // also check symbols from this result (that are not yet in symbol table) + if existing, ok := resultSyms[fqn]; ok { + if err := reportSymbolCollision(pos, fqn, isEnumVal, existing, handler); err != nil { + return err + } + } + resultSyms[fqn] = symbolEntry{ + pos: pos, + isEnumValue: isEnumVal, + } + + return nil + }) +} + +func packageNameStart(r *result) ast.SourcePos { + if node, ok := r.FileNode().(*ast.FileNode); ok { + for _, decl := range node.Decls { + if pkgNode, ok := decl.(*ast.PackageNode); ok { + return r.FileNode().NodeInfo(pkgNode.Name).Start() + } + } + } + return ast.UnknownPos(r.Path()) +} + +func nameStart(file ast.FileDeclNode, n ast.Node) ast.SourcePos { + // TODO: maybe ast package needs a NamedNode interface to simplify this? + switch n := n.(type) { + case ast.FieldDeclNode: + return file.NodeInfo(n.FieldName()).Start() + case ast.MessageDeclNode: + return file.NodeInfo(n.MessageName()).Start() + case ast.OneOfDeclNode: + return file.NodeInfo(n.OneOfName()).Start() + case ast.EnumValueDeclNode: + return file.NodeInfo(n.GetName()).Start() + case *ast.EnumNode: + return file.NodeInfo(n.Name).Start() + case *ast.ServiceNode: + return file.NodeInfo(n.Name).Start() + case ast.RPCDeclNode: + return file.NodeInfo(n.GetName()).Start() + default: + return file.NodeInfo(n).Start() + } +} + +func (s *packageSymbols) commitResultLocked(r *result) { + if s.symbols == nil { + s.symbols = map[protoreflect.FullName]symbolEntry{} + } + if s.exts == nil { + s.exts = map[extNumber]ast.SourcePos{} + } + _ = walk.DescriptorProtos(r.FileDescriptorProto(), func(fqn protoreflect.FullName, d proto.Message) error { + pos := nameStart(r.FileNode(), r.Node(d)) + _, isEnumValue := d.(protoreflect.EnumValueDescriptor) + s.symbols[fqn] = symbolEntry{pos: pos, isEnumValue: isEnumValue} + return nil + }) + + if s.files == nil { + s.files = map[protoreflect.FileDescriptor]struct{}{} + } + s.files[r] = struct{}{} +} + +func (s *Symbols) AddExtension(pkg, extendee protoreflect.FullName, tag protoreflect.FieldNumber, pos ast.SourcePos, handler *reporter.Handler) error { + if pkg != "" { + if !strings.HasPrefix(string(extendee), string(pkg)+".") { + return handler.HandleErrorf(pos, "could not register extension: extendee %q does not match package %q", extendee, pkg) + } + } + pkgSyms := s.getPackage(pkg) + if pkgSyms == nil { + // should never happen + return handler.HandleErrorf(pos, "could not register extension: missing package symbols for %q", pkg) + } + return pkgSyms.addExtension(extendee, tag, pos, handler) +} + +func (s *packageSymbols) addExtension(extendee protoreflect.FullName, tag protoreflect.FieldNumber, pos ast.SourcePos, handler *reporter.Handler) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.exts == nil { + s.exts = map[extNumber]ast.SourcePos{} + } + + extNum := extNumber{extendee: extendee, tag: tag} + if existing, ok := s.exts[extNum]; ok { + if err := handler.HandleErrorf(pos, "extension with tag %d for message %s already defined at %v", tag, extendee, existing); err != nil { + return err + } + } else { + s.exts[extNum] = pos + } + return nil +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/validate.go b/vendor/github.com/bufbuild/protocompile/linker/validate.go new file mode 100644 index 00000000..f30c5718 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/validate.go @@ -0,0 +1,283 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/reporter" +) + +// ValidateOptions runs some validation checks on the result that can only +// be done after options are interpreted. +func (r *result) ValidateOptions(handler *reporter.Handler) error { + if err := r.validateExtensions(r, handler); err != nil { + return err + } + return r.validateJSONNamesInFile(handler) +} + +func (r *result) validateExtensions(d hasExtensionsAndMessages, handler *reporter.Handler) error { + for i := 0; i < d.Extensions().Len(); i++ { + if err := r.validateExtension(d.Extensions().Get(i), handler); err != nil { + return err + } + } + for i := 0; i < d.Messages().Len(); i++ { + if err := r.validateExtensions(d.Messages().Get(i), handler); err != nil { + return err + } + } + return nil +} + +func (r *result) validateExtension(fld protoreflect.FieldDescriptor, handler *reporter.Handler) error { + // NB: It's a little gross that we don't enforce these in validateBasic(). + // But it requires linking to resolve the extendee, so we can interrogate + // its descriptor. + if xtd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { + fld = xtd.Descriptor() + } + fd := fld.(*fldDescriptor) //nolint:errcheck + if fld.ContainingMessage().Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() { + // Message set wire format requires that all extensions be messages + // themselves (no scalar extensions) + if fld.Kind() != protoreflect.MessageKind { + file := r.FileNode() + pos := file.NodeInfo(r.FieldNode(fd.proto).FieldType()).Start() + return handler.HandleErrorf(pos, "messages with message-set wire format cannot contain scalar extensions, only messages") + } + if fld.Cardinality() == protoreflect.Repeated { + file := r.FileNode() + pos := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()).Start() + return handler.HandleErrorf(pos, "messages with message-set wire format cannot contain repeated extensions, only optional") + } + } else if fld.Number() > internal.MaxNormalTag { + // In validateBasic() we just made sure these were within bounds for any message. But + // now that things are linked, we can check if the extendee is messageset wire format + // and, if not, enforce tighter limit. + file := r.FileNode() + pos := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()).Start() + return handler.HandleErrorf(pos, "tag number %d is higher than max allowed tag number (%d)", fld.Number(), internal.MaxNormalTag) + } + + return nil +} + +func (r *result) validateJSONNamesInFile(handler *reporter.Handler) error { + for _, md := range r.FileDescriptorProto().GetMessageType() { + if err := r.validateJSONNamesInMessage(md, handler); err != nil { + return err + } + } + for _, ed := range r.FileDescriptorProto().GetEnumType() { + if err := r.validateJSONNamesInEnum(ed, handler); err != nil { + return err + } + } + return nil +} + +func (r *result) validateJSONNamesInMessage(md *descriptorpb.DescriptorProto, handler *reporter.Handler) error { + if err := r.validateFieldJSONNames(md, false, handler); err != nil { + return err + } + if err := r.validateFieldJSONNames(md, true, handler); err != nil { + return err + } + + for _, nmd := range md.GetNestedType() { + if err := r.validateJSONNamesInMessage(nmd, handler); err != nil { + return err + } + } + for _, ed := range md.GetEnumType() { + if err := r.validateJSONNamesInEnum(ed, handler); err != nil { + return err + } + } + return nil +} + +func (r *result) validateJSONNamesInEnum(ed *descriptorpb.EnumDescriptorProto, handler *reporter.Handler) error { + seen := map[string]*descriptorpb.EnumValueDescriptorProto{} + for _, evd := range ed.GetValue() { + scope := "enum value " + ed.GetName() + "." + evd.GetName() + + name := canonicalEnumValueName(evd.GetName(), ed.GetName()) + if existing, ok := seen[name]; ok && evd.GetNumber() != existing.GetNumber() { + fldNode := r.EnumValueNode(evd) + existingNode := r.EnumValueNode(existing) + conflictErr := fmt.Errorf("%s: camel-case name (with optional enum name prefix removed) %q conflicts with camel-case name of enum value %s, defined at %v", + scope, name, existing.GetName(), r.FileNode().NodeInfo(existingNode).Start()) + + // Since proto2 did not originally have a JSON format, we report conflicts as just warnings + if r.Syntax() != protoreflect.Proto3 { + handler.HandleWarningWithPos(r.FileNode().NodeInfo(fldNode).Start(), conflictErr) + } else if err := handler.HandleErrorf(r.FileNode().NodeInfo(fldNode).Start(), conflictErr.Error()); err != nil { + return err + } + } else { + seen[name] = evd + } + } + return nil +} + +func (r *result) validateFieldJSONNames(md *descriptorpb.DescriptorProto, useCustom bool, handler *reporter.Handler) error { + type jsonName struct { + source *descriptorpb.FieldDescriptorProto + // true if orig is a custom JSON name (vs. the field's default JSON name) + custom bool + } + seen := map[string]jsonName{} + + for _, fd := range md.GetField() { + scope := "field " + md.GetName() + "." + fd.GetName() + defaultName := internal.JSONName(fd.GetName()) + name := defaultName + custom := false + if useCustom { + n := fd.GetJsonName() + if n != defaultName || r.hasCustomJSONName(fd) { + name = n + custom = true + } + } + if existing, ok := seen[name]; ok { + // When useCustom is true, we'll only report an issue when a conflict is + // due to a custom name. That way, we don't double report conflicts on + // non-custom names. + if !useCustom || custom || existing.custom { + fldNode := r.FieldNode(fd) + customStr, srcCustomStr := "custom", "custom" + if !custom { + customStr = "default" + } + if !existing.custom { + srcCustomStr = "default" + } + pos := r.FileNode().NodeInfo(fldNode).Start() + conflictErr := reporter.Errorf(pos, "%s: %s JSON name %q conflicts with %s JSON name of field %s, defined at %v", + scope, customStr, name, srcCustomStr, existing.source.GetName(), r.FileNode().NodeInfo(r.FieldNode(existing.source)).Start()) + + // Since proto2 did not originally have default JSON names, we report conflicts + // between default names (neither is a custom name) as just warnings. + if r.Syntax() != protoreflect.Proto3 && !custom && !existing.custom { + handler.HandleWarning(conflictErr) + } else if err := handler.HandleError(conflictErr); err != nil { + return err + } + } + } else { + seen[name] = jsonName{source: fd, custom: custom} + } + } + return nil +} + +func (r *result) hasCustomJSONName(fdProto *descriptorpb.FieldDescriptorProto) bool { + // if we have the AST, we can more precisely determine if there was a custom + // JSON named defined, even if it is explicitly configured to tbe the same + // as the default JSON name for the field. + opts := r.FieldNode(fdProto).GetOptions() + if opts == nil { + return false + } + for _, opt := range opts.Options { + if len(opt.Name.Parts) == 1 && + opt.Name.Parts[0].Name.AsIdentifier() == "json_name" && + !opt.Name.Parts[0].IsExtension() { + return true + } + } + return false +} + +func canonicalEnumValueName(enumValueName, enumName string) string { + return enumValCamelCase(removePrefix(enumValueName, enumName)) +} + +// removePrefix is used to remove the given prefix from the given str. It does not require +// an exact match and ignores case and underscores. If the all non-underscore characters +// would be removed from str, str is returned unchanged. If str does not have the given +// prefix (even with the very lenient matching, in regard to case and underscores), then +// str is returned unchanged. +// +// The algorithm is adapted from the protoc source: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L922 +func removePrefix(str, prefix string) string { + j := 0 + for i, r := range str { + if r == '_' { + // skip underscores in the input + continue + } + + p, sz := utf8.DecodeRuneInString(prefix[j:]) + for p == '_' { + j += sz // consume/skip underscore + p, sz = utf8.DecodeRuneInString(prefix[j:]) + } + + if j == len(prefix) { + // matched entire prefix; return rest of str + // but skipping any leading underscores + result := strings.TrimLeft(str[i:], "_") + if len(result) == 0 { + // result can't be empty string + return str + } + return result + } + if unicode.ToLower(r) != unicode.ToLower(p) { + // does not match prefix + return str + } + j += sz // consume matched rune of prefix + } + return str +} + +// enumValCamelCase converts the given string to upper-camel-case. +// +// The algorithm is adapted from the protoc source: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L887 +func enumValCamelCase(name string) string { + var js []rune + nextUpper := true + for _, r := range name { + if r == '_' { + nextUpper = true + continue + } + if nextUpper { + nextUpper = false + js = append(js, unicode.ToUpper(r)) + } else { + js = append(js, unicode.ToLower(r)) + } + } + return string(js) +} diff --git a/vendor/github.com/bufbuild/protocompile/options/options.go b/vendor/github.com/bufbuild/protocompile/options/options.go new file mode 100644 index 00000000..c26f136c --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/options/options.go @@ -0,0 +1,1611 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package options contains the logic for interpreting options. The parse step +// of compilation stores the options in uninterpreted form, which contains raw +// identifiers and literal values. +// +// The process of interpreting an option is to resolve identifiers, by examining +// descriptors for the google.protobuf.*Options types and their available +// extensions (custom options). As field names are resolved, the values can be +// type-checked against the types indicated in field descriptors. +// +// On success, the various fields and extensions of the options message are +// populated and the field holding the uninterpreted form is cleared. +package options + +import ( + "bytes" + "fmt" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" +) + +// Index is a mapping of AST nodes that define options to a corresponding path +// into the containing file descriptor. The path is a sequence of field tags +// and indexes that define a traversal path from the root (the file descriptor) +// to the resolved option field. +type Index map[*ast.OptionNode][]int32 + +type interpreter struct { + file file + resolver linker.Resolver + container optionsContainer + lenient bool + reporter *reporter.Handler + index Index +} + +type file interface { + parser.Result + ResolveEnumType(protoreflect.FullName) protoreflect.EnumDescriptor + ResolveMessageType(protoreflect.FullName) protoreflect.MessageDescriptor + ResolveOptionsType(protoreflect.FullName) protoreflect.MessageDescriptor + ResolveExtension(protoreflect.FullName) protoreflect.ExtensionTypeDescriptor + ResolveMessageLiteralExtensionName(ast.IdentValueNode) string +} + +type noResolveFile struct { + parser.Result +} + +func (n noResolveFile) ResolveEnumType(name protoreflect.FullName) protoreflect.EnumDescriptor { + return nil +} + +func (n noResolveFile) ResolveMessageType(name protoreflect.FullName) protoreflect.MessageDescriptor { + return nil +} + +func (n noResolveFile) ResolveOptionsType(name protoreflect.FullName) protoreflect.MessageDescriptor { + return nil +} + +func (n noResolveFile) ResolveExtension(name protoreflect.FullName) protoreflect.ExtensionTypeDescriptor { + return nil +} + +func (n noResolveFile) ResolveMessageLiteralExtensionName(ast.IdentValueNode) string { + return "" +} + +// InterpretOptions interprets options in the given linked result, returning +// an index that can be used to generate source code info. This step mutates +// the linked result's underlying proto to move option elements out of the +// "uninterpreted_option" fields and into proper option fields and extensions. +// +// The given handler is used to report errors and warnings. If any errors are +// reported, this function returns a non-nil error. +func InterpretOptions(linked linker.Result, handler *reporter.Handler) (Index, error) { + return interpretOptions(false, linked, handler) +} + +// InterpretOptionsLenient interprets options in a lenient/best-effort way in +// the given linked result, returning an index that can be used to generate +// source code info. This step mutates the linked result's underlying proto to +// move option elements out of the "uninterpreted_option" fields and into proper +// option fields and extensions. +// +// In lenient more, errors resolving option names and type errors are ignored. +// Any options that are uninterpretable (due to such errors) will remain in the +// "uninterpreted_option" fields. +func InterpretOptionsLenient(linked linker.Result) (Index, error) { + return interpretOptions(true, linked, reporter.NewHandler(nil)) +} + +// InterpretUnlinkedOptions does a best-effort attempt to interpret options in +// the given parsed result, returning an index that can be used to generate +// source code info. This step mutates the parsed result's underlying proto to +// move option elements out of the "uninterpreted_option" fields and into proper +// option fields and extensions. +// +// This is the same as InterpretOptionsLenient except that it accepts an +// unlinked result. Because the file is unlinked, custom options cannot be +// interpreted. Other errors resolving option names or type errors will be +// effectively ignored. Any options that are uninterpretable (due to such +// errors) will remain in the "uninterpreted_option" fields. +func InterpretUnlinkedOptions(parsed parser.Result) (Index, error) { + return interpretOptions(true, noResolveFile{parsed}, reporter.NewHandler(nil)) +} + +func interpretOptions(lenient bool, file file, handler *reporter.Handler) (Index, error) { + interp := interpreter{ + file: file, + lenient: lenient, + reporter: handler, + index: Index{}, + } + interp.container, _ = file.(optionsContainer) + if f, ok := file.(linker.File); ok { + interp.resolver = linker.ResolverFromFile(f) + } + + fd := file.FileDescriptorProto() + prefix := fd.GetPackage() + if prefix != "" { + prefix += "." + } + opts := fd.GetOptions() + if opts != nil { + if len(opts.UninterpretedOption) > 0 { + remain, err := interp.interpretOptions(fd.GetName(), fd, opts, opts.UninterpretedOption) + if err != nil { + return nil, err + } + opts.UninterpretedOption = remain + } + } + for _, md := range fd.GetMessageType() { + fqn := prefix + md.GetName() + if err := interp.interpretMessageOptions(fqn, md); err != nil { + return nil, err + } + } + for _, fld := range fd.GetExtension() { + fqn := prefix + fld.GetName() + if err := interp.interpretFieldOptions(fqn, fld); err != nil { + return nil, err + } + } + for _, ed := range fd.GetEnumType() { + fqn := prefix + ed.GetName() + if err := interp.interpretEnumOptions(fqn, ed); err != nil { + return nil, err + } + } + for _, sd := range fd.GetService() { + fqn := prefix + sd.GetName() + opts := sd.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(fqn, sd, opts, opts.UninterpretedOption) + if err != nil { + return nil, err + } + opts.UninterpretedOption = remain + } + for _, mtd := range sd.GetMethod() { + mtdFqn := fqn + "." + mtd.GetName() + opts := mtd.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(mtdFqn, mtd, opts, opts.UninterpretedOption) + if err != nil { + return nil, err + } + opts.UninterpretedOption = remain + } + } + } + return interp.index, nil +} + +func (interp *interpreter) nodeInfo(n ast.Node) ast.NodeInfo { + return interp.file.FileNode().NodeInfo(n) +} + +func (interp *interpreter) interpretMessageOptions(fqn string, md *descriptorpb.DescriptorProto) error { + opts := md.GetOptions() + if opts != nil { + if len(opts.UninterpretedOption) > 0 { + remain, err := interp.interpretOptions(fqn, md, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain + } + } + for _, fld := range md.GetField() { + fldFqn := fqn + "." + fld.GetName() + if err := interp.interpretFieldOptions(fldFqn, fld); err != nil { + return err + } + } + for _, ood := range md.GetOneofDecl() { + oodFqn := fqn + "." + ood.GetName() + opts := ood.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(oodFqn, ood, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain + } + } + for _, fld := range md.GetExtension() { + fldFqn := fqn + "." + fld.GetName() + if err := interp.interpretFieldOptions(fldFqn, fld); err != nil { + return err + } + } + for _, er := range md.GetExtensionRange() { + erFqn := fmt.Sprintf("%s.%d-%d", fqn, er.GetStart(), er.GetEnd()) + opts := er.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(erFqn, er, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain + } + } + for _, nmd := range md.GetNestedType() { + nmdFqn := fqn + "." + nmd.GetName() + if err := interp.interpretMessageOptions(nmdFqn, nmd); err != nil { + return err + } + } + for _, ed := range md.GetEnumType() { + edFqn := fqn + "." + ed.GetName() + if err := interp.interpretEnumOptions(edFqn, ed); err != nil { + return err + } + } + return nil +} + +func (interp *interpreter) interpretFieldOptions(fqn string, fld *descriptorpb.FieldDescriptorProto) error { + opts := fld.GetOptions() + if len(opts.GetUninterpretedOption()) == 0 { + return nil + } + uo := opts.UninterpretedOption + scope := fmt.Sprintf("field %s", fqn) + + // process json_name pseudo-option + index, err := internal.FindOption(interp.file, interp.reporter, scope, uo, "json_name") + if err != nil && !interp.lenient { + return err + } + if index >= 0 { + opt := uo[index] + optNode := interp.file.OptionNode(opt) + if fld.GetExtendee() != "" { + return interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetName()).Start(), "%s: option json_name is not allowed on extensions", scope) + } + // attribute source code info + if on, ok := optNode.(*ast.OptionNode); ok { + interp.index[on] = []int32{-1, internal.FieldJSONNameTag} + } + uo = internal.RemoveOption(uo, index) + if opt.StringValue == nil { + return interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetValue()).Start(), "%s: expecting string value for json_name option", scope) + } + name := string(opt.StringValue) + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + return interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetValue()).Start(), "%s: option json_name value cannot start with '[' and end with ']'; that is reserved for representing extensions", scope) + } + fld.JsonName = proto.String(name) + } + + // and process default pseudo-option + if index, err := interp.processDefaultOption(scope, fqn, fld, uo); err != nil && !interp.lenient { + return err + } else if index >= 0 { + // attribute source code info + optNode := interp.file.OptionNode(uo[index]) + if on, ok := optNode.(*ast.OptionNode); ok { + interp.index[on] = []int32{-1, internal.FieldDefaultTag} + } + uo = internal.RemoveOption(uo, index) + } + + if len(uo) == 0 { + // no real options, only pseudo-options above? clear out options + fld.Options = nil + } else if remain, err := interp.interpretOptions(fqn, fld, opts, uo); err != nil { + return err + } else { + opts.UninterpretedOption = remain + } + return nil +} + +func (interp *interpreter) processDefaultOption(scope string, fqn string, fld *descriptorpb.FieldDescriptorProto, uos []*descriptorpb.UninterpretedOption) (defaultIndex int, err error) { + found, err := internal.FindOption(interp.file, interp.reporter, scope, uos, "default") + if err != nil || found == -1 { + return -1, err + } + opt := uos[found] + optNode := interp.file.OptionNode(opt) + if fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return -1, interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetName()).Start(), "%s: default value cannot be set because field is repeated", scope) + } + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP || fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE { + return -1, interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetName()).Start(), "%s: default value cannot be set because field is a message", scope) + } + val := optNode.GetValue() + if _, ok := val.(*ast.MessageLiteralNode); ok { + return -1, interp.reporter.HandleErrorf(interp.nodeInfo(val).Start(), "%s: default value cannot be a message", scope) + } + mc := &internal.MessageContext{ + File: interp.file, + ElementName: fqn, + ElementType: descriptorType(fld), + Option: opt, + } + var v interface{} + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { + ed := interp.file.ResolveEnumType(protoreflect.FullName(fld.GetTypeName())) + _, name, err := interp.enumFieldValue(mc, ed, val, false) + if err != nil { + return -1, interp.reporter.HandleError(err) + } + v = string(name) + } else { + v, err = interp.scalarFieldValue(mc, fld.GetType(), val, false) + if err != nil { + return -1, interp.reporter.HandleError(err) + } + } + if str, ok := v.(string); ok { + fld.DefaultValue = proto.String(str) + } else if b, ok := v.([]byte); ok { + fld.DefaultValue = proto.String(encodeDefaultBytes(b)) + } else { + var flt float64 + var ok bool + if flt, ok = v.(float64); !ok { + var flt32 float32 + if flt32, ok = v.(float32); ok { + flt = float64(flt32) + } + } + if ok { + switch { + case math.IsInf(flt, 1): + fld.DefaultValue = proto.String("inf") + case math.IsInf(flt, -1): + fld.DefaultValue = proto.String("-inf") + case math.IsNaN(flt): + fld.DefaultValue = proto.String("nan") + default: + fld.DefaultValue = proto.String(fmt.Sprintf("%v", v)) + } + } else { + fld.DefaultValue = proto.String(fmt.Sprintf("%v", v)) + } + } + return found, nil +} + +func encodeDefaultBytes(b []byte) string { + var buf bytes.Buffer + internal.WriteEscapedBytes(&buf, b) + return buf.String() +} + +func (interp *interpreter) interpretEnumOptions(fqn string, ed *descriptorpb.EnumDescriptorProto) error { + opts := ed.GetOptions() + if opts != nil { + if len(opts.UninterpretedOption) > 0 { + remain, err := interp.interpretOptions(fqn, ed, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain + } + } + for _, evd := range ed.GetValue() { + evdFqn := fqn + "." + evd.GetName() + opts := evd.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(evdFqn, evd, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain + } + } + return nil +} + +// interpretedOption represents the result of interpreting an option. +// This includes metadata that allows the option to be serialized to +// bytes in a way that is deterministic and can preserve the structure +// of the source (the way the options are de-structured and the order in +// which options appear). +type interpretedOption struct { + unknown bool + pathPrefix []int32 + interpretedField +} + +func (o *interpretedOption) path() []int32 { + path := o.pathPrefix + path = append(path, o.number) + if o.repeated { + path = append(path, o.index) + } + return path +} + +func (o *interpretedOption) appendOptionBytes(b []byte) ([]byte, error) { + return o.appendOptionBytesWithPath(b, o.pathPrefix) +} + +func (o *interpretedOption) appendOptionBytesWithPath(b []byte, path []int32) ([]byte, error) { + if len(path) == 0 { + return appendOptionBytesSingle(b, &o.interpretedField) + } + // NB: if we add functions to compute sizes of the options first, we could + // allocate precisely sized slice up front, which would be more efficient than + // repeated creation/growing/concatenation. + enclosed, err := o.appendOptionBytesWithPath(nil, path[1:]) + if err != nil { + return nil, err + } + b = protowire.AppendTag(b, protowire.Number(path[0]), protowire.BytesType) + return protowire.AppendBytes(b, enclosed), nil +} + +// interpretedField represents a field in an options message that is the +// result of interpreting an option. This is used for the option value +// itself as well as for subfields when an option value is a message +// literal. +type interpretedField struct { + // field number + number int32 + // index of this element inside a repeated field; only set if repeated == true + index int32 + // true if this is a repeated field + repeated bool + // true if this is a repeated field that stores scalar values in packed form + packed bool + // the field's kind + kind protoreflect.Kind + + value interpretedFieldValue +} + +// interpretedFieldValue is a wrapper around protoreflect.Value that +// includes extra metadata. +type interpretedFieldValue struct { + // the field value + val protoreflect.Value + // if true, this value is a list of values, not a singular value + isList bool + // non-nil for singular message values + msgVal []*interpretedField + // non-nil for non-empty lists of message values + msgListVal [][]*interpretedField +} + +func appendOptionBytes(b []byte, flds []*interpretedField) ([]byte, error) { + // protoc emits messages sorted by field number + if len(flds) > 1 { + sort.SliceStable(flds, func(i, j int) bool { + return flds[i].number < flds[j].number + }) + } + + for i := 0; i < len(flds); i++ { + f := flds[i] + switch { + case f.packed && canPack(f.kind): + // for packed repeated numeric fields, all runs of values are merged into one packed list + num := f.number + j := i + for j < len(flds) && flds[j].number == num { + j++ + } + // now flds[i:j] is the range of contiguous fields for the same field number + enclosed, err := appendOptionBytesPacked(nil, f.kind, flds[i:j]) + if err != nil { + return nil, err + } + b = protowire.AppendTag(b, protowire.Number(f.number), protowire.BytesType) + b = protowire.AppendBytes(b, enclosed) + // skip over the other subsequent fields we just serialized + i = j - 1 + case f.value.isList: + // if not packed, then emit one value at a time + single := *f + single.value.isList = false + single.value.msgListVal = nil + l := f.value.val.List() + for i := 0; i < l.Len(); i++ { + single.value.val = l.Get(i) + if f.kind == protoreflect.MessageKind || f.kind == protoreflect.GroupKind { + single.value.msgVal = f.value.msgListVal[i] + } + var err error + b, err = appendOptionBytesSingle(b, &single) + if err != nil { + return nil, err + } + } + default: + // simple singular value + var err error + b, err = appendOptionBytesSingle(b, f) + if err != nil { + return nil, err + } + } + } + + return b, nil +} + +func canPack(k protoreflect.Kind) bool { + switch k { + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.StringKind, protoreflect.BytesKind: + return false + default: + return true + } +} + +func appendOptionBytesPacked(b []byte, k protoreflect.Kind, flds []*interpretedField) ([]byte, error) { + for i := range flds { + val := flds[i].value + if val.isList { + l := val.val.List() + var err error + b, err = appendNumericValueBytesPacked(b, k, l) + if err != nil { + return nil, err + } + } else { + var err error + b, err = appendNumericValueBytes(b, k, val.val) + if err != nil { + return nil, err + } + } + } + return b, nil +} + +func appendOptionBytesSingle(b []byte, f *interpretedField) ([]byte, error) { + num := protowire.Number(f.number) + switch f.kind { + case protoreflect.MessageKind: + enclosed, err := appendOptionBytes(nil, f.value.msgVal) + if err != nil { + return nil, err + } + b = protowire.AppendTag(b, num, protowire.BytesType) + return protowire.AppendBytes(b, enclosed), nil + + case protoreflect.GroupKind: + b = protowire.AppendTag(b, num, protowire.StartGroupType) + var err error + b, err = appendOptionBytes(b, f.value.msgVal) + if err != nil { + return nil, err + } + return protowire.AppendTag(b, num, protowire.EndGroupType), nil + + case protoreflect.StringKind: + b = protowire.AppendTag(b, num, protowire.BytesType) + return protowire.AppendString(b, f.value.val.String()), nil + + case protoreflect.BytesKind: + b = protowire.AppendTag(b, num, protowire.BytesType) + return protowire.AppendBytes(b, f.value.val.Bytes()), nil + + case protoreflect.Int32Kind, protoreflect.Int64Kind, protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, protoreflect.EnumKind, protoreflect.BoolKind: + b = protowire.AppendTag(b, num, protowire.VarintType) + return appendNumericValueBytes(b, f.kind, f.value.val) + + case protoreflect.Fixed32Kind, protoreflect.Sfixed32Kind, protoreflect.FloatKind: + b = protowire.AppendTag(b, num, protowire.Fixed32Type) + return appendNumericValueBytes(b, f.kind, f.value.val) + + case protoreflect.Fixed64Kind, protoreflect.Sfixed64Kind, protoreflect.DoubleKind: + b = protowire.AppendTag(b, num, protowire.Fixed64Type) + return appendNumericValueBytes(b, f.kind, f.value.val) + + default: + return nil, fmt.Errorf("unknown field kind: %v", f.kind) + } +} + +func appendNumericValueBytesPacked(b []byte, k protoreflect.Kind, l protoreflect.List) ([]byte, error) { + for i := 0; i < l.Len(); i++ { + var err error + b, err = appendNumericValueBytes(b, k, l.Get(i)) + if err != nil { + return nil, err + } + } + return b, nil +} + +func appendNumericValueBytes(b []byte, k protoreflect.Kind, v protoreflect.Value) ([]byte, error) { + switch k { + case protoreflect.Int32Kind, protoreflect.Int64Kind: + return protowire.AppendVarint(b, uint64(v.Int())), nil + case protoreflect.Uint32Kind, protoreflect.Uint64Kind: + return protowire.AppendVarint(b, v.Uint()), nil + case protoreflect.Sint32Kind, protoreflect.Sint64Kind: + return protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())), nil + case protoreflect.Fixed32Kind: + return protowire.AppendFixed32(b, uint32(v.Uint())), nil + case protoreflect.Fixed64Kind: + return protowire.AppendFixed64(b, v.Uint()), nil + case protoreflect.Sfixed32Kind: + return protowire.AppendFixed32(b, uint32(v.Int())), nil + case protoreflect.Sfixed64Kind: + return protowire.AppendFixed64(b, uint64(v.Int())), nil + case protoreflect.FloatKind: + return protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))), nil + case protoreflect.DoubleKind: + return protowire.AppendFixed64(b, math.Float64bits(v.Float())), nil + case protoreflect.BoolKind: + return protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())), nil + case protoreflect.EnumKind: + return protowire.AppendVarint(b, uint64(v.Enum())), nil + default: + return nil, fmt.Errorf("unknown field kind: %v", k) + } +} + +// optionsContainer may be optionally implemented by a linker.Result. It is +// not part of the linker.Result interface as it is meant only for internal use. +// This allows the option interpreter step to store extra metadata about the +// serialized structure of options. +type optionsContainer interface { + // AddOptionBytes adds the given pre-serialized option bytes to a file, + // associated with the given options message. The type of the given message + // should be an options message, for example *descriptorpb.MessageOptions. + // This value should be part of the message hierarchy whose root is the + // *descriptorpb.FileDescriptorProto that corresponds to this result. + AddOptionBytes(pm proto.Message, opts []byte) +} + +// interpretOptions processes the options in uninterpreted, which are interpreted as fields +// of the given opts message. On success, it will usually return nil, nil. But if the current +// operation is lenient, it may return a non-nil slice of uninterpreted options on success. +// In such a case, the returned value is the remaining slice of options which could not be +// interpreted. +func (interp *interpreter) interpretOptions(fqn string, element, opts proto.Message, uninterpreted []*descriptorpb.UninterpretedOption) ([]*descriptorpb.UninterpretedOption, error) { + optsDesc := opts.ProtoReflect().Descriptor() + optsFqn := string(optsDesc.FullName()) + var msg protoreflect.Message + // see if the parse included an override copy for these options + if md := interp.file.ResolveOptionsType(protoreflect.FullName(optsFqn)); md != nil { + dm := dynamicpb.NewMessage(md) + if err := cloneInto(dm, opts, nil); err != nil { + node := interp.file.Node(element) + return nil, interp.reporter.HandleError(reporter.Error(interp.nodeInfo(node).Start(), err)) + } + msg = dm + } else { + msg = proto.Clone(opts).ProtoReflect() + } + + mc := &internal.MessageContext{ + File: interp.file, + ElementName: fqn, + ElementType: descriptorType(element), + } + var remain []*descriptorpb.UninterpretedOption + results := make([]*interpretedOption, 0, len(uninterpreted)) + for _, uo := range uninterpreted { + node := interp.file.OptionNode(uo) + if !uo.Name[0].GetIsExtension() && uo.Name[0].GetNamePart() == "uninterpreted_option" { + if interp.lenient { + remain = append(remain, uo) + continue + } + // uninterpreted_option might be found reflectively, but is not actually valid for use + if err := interp.reporter.HandleErrorf(interp.nodeInfo(node.GetName()).Start(), "%vinvalid option 'uninterpreted_option'", mc); err != nil { + return nil, err + } + } + mc.Option = uo + res, err := interp.interpretField(mc, msg, uo, 0, nil) + if err != nil { + if interp.lenient { + remain = append(remain, uo) + continue + } + return nil, err + } + res.unknown = !isKnownField(optsDesc, res) + results = append(results, res) + if optn, ok := node.(*ast.OptionNode); ok { + interp.index[optn] = res.path() + } + } + + if interp.lenient { + // If we're lenient, then we don't want to clobber the passed in message + // and leave it partially populated. So we convert into a copy first + optsClone := opts.ProtoReflect().New().Interface() + if err := cloneInto(optsClone, msg.Interface(), interp.resolver); err != nil { + // TODO: do this in a more granular way, so we can convert individual + // fields and leave bad ones uninterpreted instead of skipping all of + // the work we've done so far. + return uninterpreted, nil + } + // conversion from dynamic message above worked, so now + // it is safe to overwrite the passed in message + proto.Reset(opts) + proto.Merge(opts, optsClone) + + if interp.container != nil { + b, err := interp.toOptionBytes(mc, results) + if err != nil { + return nil, err + } + interp.container.AddOptionBytes(opts, b) + } + + return remain, nil + } + + if err := validateRecursive(msg, ""); err != nil { + node := interp.file.Node(element) + if err := interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), "error in %s options: %v", descriptorType(element), err); err != nil { + return nil, err + } + } + + // now try to convert into the passed in message and fail if not successful + if err := cloneInto(opts, msg.Interface(), interp.resolver); err != nil { + node := interp.file.Node(element) + return nil, interp.reporter.HandleError(reporter.Error(interp.nodeInfo(node).Start(), err)) + } + if interp.container != nil { + b, err := interp.toOptionBytes(mc, results) + if err != nil { + return nil, err + } + interp.container.AddOptionBytes(opts, b) + } + + return nil, nil +} + +// isKnownField returns true if the given option is for a known field of the +// given options message descriptor and will be serialized using the expected +// wire type for that known field. +func isKnownField(desc protoreflect.MessageDescriptor, opt *interpretedOption) bool { + var num int32 + if len(opt.pathPrefix) > 0 { + num = opt.pathPrefix[0] + } else { + num = opt.number + } + fd := desc.Fields().ByNumber(protoreflect.FieldNumber(num)) + if fd == nil { + return false + } + + // Before the full wire type check, we do a quick check that will usually pass + // and allow us to short-circuit the logic below. + if fd.IsList() == opt.repeated && fd.Kind() == opt.kind { + return true + } + + // We figure out the wire type this interpreted field will use when serialized. + var wireType protowire.Type + switch { + case len(opt.pathPrefix) > 0: + // If path prefix exists, this field is nested inside a message. + // And messages use bytes wire type. + wireType = protowire.BytesType + case opt.repeated && opt.packed && canPack(opt.kind): + // Packed repeated numeric scalars use bytes wire type. + wireType = protowire.BytesType + default: + wireType = wireTypeForKind(opt.kind) + } + + // And then we see if the wire type we just determined is compatible with + // the field descriptor we found. + if fd.IsList() && canPack(fd.Kind()) && wireType == protowire.BytesType { + // Even if fd.IsPacked() is false, bytes type is still accepted for + // repeated scalar numerics, so that changing a repeated field from + // packed to not-packed (or vice versa) is a compatible change. + return true + } + return wireType == wireTypeForKind(fd.Kind()) +} + +func wireTypeForKind(kind protoreflect.Kind) protowire.Type { + switch kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: + return protowire.BytesType + case protoreflect.GroupKind: + return protowire.StartGroupType + case protoreflect.Fixed32Kind, protoreflect.Sfixed32Kind, protoreflect.FloatKind: + return protowire.Fixed32Type + case protoreflect.Fixed64Kind, protoreflect.Sfixed64Kind, protoreflect.DoubleKind: + return protowire.Fixed64Type + default: + // everything else uses varint + return protowire.VarintType + } +} + +func cloneInto(dest proto.Message, src proto.Message, res linker.Resolver) error { + if dest.ProtoReflect().Descriptor() == src.ProtoReflect().Descriptor() { + proto.Reset(dest) + proto.Merge(dest, src) + if err := proto.CheckInitialized(dest); err != nil { + return err + } + return nil + } + + // If descriptors are not the same, we could have field descriptors in src that + // don't match the ones in dest. There's no easy/sane way to handle that. So we + // just marshal to bytes and back to do this + data, err := proto.Marshal(src) + if err != nil { + return err + } + return proto.UnmarshalOptions{Resolver: res}.Unmarshal(data, dest) +} + +func (interp *interpreter) toOptionBytes(mc *internal.MessageContext, results []*interpretedOption) ([]byte, error) { + // protoc emits non-custom options in tag order and then + // the rest are emitted in the order they are defined in source + sort.SliceStable(results, func(i, j int) bool { + if !results[i].unknown && results[j].unknown { + return true + } + if !results[i].unknown && !results[j].unknown { + return results[i].number < results[j].number + } + return false + }) + var b []byte + for _, res := range results { + var err error + b, err = res.appendOptionBytes(b) + if err != nil { + if _, ok := err.(reporter.ErrorWithPos); !ok { + pos := ast.SourcePos{Filename: interp.file.AST().Name()} + err = reporter.Errorf(pos, "%sfailed to encode options: %w", mc, err) + } + if err := interp.reporter.HandleError(err); err != nil { + return nil, err + } + } + } + return b, nil +} + +func validateRecursive(msg protoreflect.Message, prefix string) error { + flds := msg.Descriptor().Fields() + var missingFields []string + for i := 0; i < flds.Len(); i++ { + fld := flds.Get(i) + if fld.Cardinality() == protoreflect.Required && !msg.Has(fld) { + missingFields = append(missingFields, fmt.Sprintf("%s%s", prefix, fld.Name())) + } + } + if len(missingFields) > 0 { + return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", ")) + } + + var err error + msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if fld.IsMap() { + md := fld.MapValue().Message() + if md != nil { + val.Map().Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + chprefix := fmt.Sprintf("%s%s[%v].", prefix, fieldName(fld), k) + err = validateRecursive(v.Message(), chprefix) + return err == nil + }) + if err != nil { + return false + } + } + } else { + md := fld.Message() + if md != nil { + if fld.IsList() { + sl := val.List() + for i := 0; i < sl.Len(); i++ { + v := sl.Get(i) + chprefix := fmt.Sprintf("%s%s[%d].", prefix, fieldName(fld), i) + err = validateRecursive(v.Message(), chprefix) + if err != nil { + return false + } + } + } else { + chprefix := fmt.Sprintf("%s%s.", prefix, fieldName(fld)) + err = validateRecursive(val.Message(), chprefix) + if err != nil { + return false + } + } + } + } + return true + }) + return err +} + +// interpretField interprets the option described by opt, as a field inside the given msg. This +// interprets components of the option name starting at nameIndex. When nameIndex == 0, then +// msg must be an options message. For nameIndex > 0, msg is a nested message inside of the +// options message. The given pathPrefix is the path (sequence of field numbers and indices +// with a FileDescriptorProto as the start) up to but not including the given nameIndex. +func (interp *interpreter) interpretField(mc *internal.MessageContext, msg protoreflect.Message, opt *descriptorpb.UninterpretedOption, nameIndex int, pathPrefix []int32) (*interpretedOption, error) { + var fld protoreflect.FieldDescriptor + nm := opt.GetName()[nameIndex] + node := interp.file.OptionNamePartNode(nm) + if nm.GetIsExtension() { + extName := nm.GetNamePart() + if extName[0] == '.' { + extName = extName[1:] /* skip leading dot */ + } + fld = interp.file.ResolveExtension(protoreflect.FullName(extName)) + if fld == nil { + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), + "%vunrecognized extension %s of %s", + mc, extName, msg.Descriptor().FullName()) + } + if fld.ContainingMessage().FullName() != msg.Descriptor().FullName() { + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), + "%vextension %s should extend %s but instead extends %s", + mc, extName, msg.Descriptor().FullName(), fld.ContainingMessage().FullName()) + } + } else { + fld = msg.Descriptor().Fields().ByName(protoreflect.Name(nm.GetNamePart())) + if fld == nil { + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), + "%vfield %s of %s does not exist", + mc, nm.GetNamePart(), msg.Descriptor().FullName()) + } + } + + if len(opt.GetName()) > nameIndex+1 { + nextnm := opt.GetName()[nameIndex+1] + nextnode := interp.file.OptionNamePartNode(nextnm) + k := fld.Kind() + if k != protoreflect.MessageKind && k != protoreflect.GroupKind { + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(nextnode).Start(), + "%vcannot set field %s because %s is not a message", + mc, nextnm.GetNamePart(), nm.GetNamePart()) + } + if fld.Cardinality() == protoreflect.Repeated { + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(nextnode).Start(), + "%vcannot set field %s because %s is repeated (must use an aggregate)", + mc, nextnm.GetNamePart(), nm.GetNamePart()) + } + var fdm protoreflect.Message + if msg.Has(fld) { + v := msg.Mutable(fld) + fdm = v.Message() + } else { + if ood := fld.ContainingOneof(); ood != nil { + existingFld := msg.WhichOneof(ood) + if existingFld != nil && existingFld.Number() != fld.Number() { + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), + "%voneof %q already has field %q set", + mc, ood.Name(), fieldName(existingFld)) + } + } + fdm = dynamicpb.NewMessage(fld.Message()) + msg.Set(fld, protoreflect.ValueOfMessage(fdm)) + } + // recurse to set next part of name + return interp.interpretField(mc, fdm, opt, nameIndex+1, append(pathPrefix, int32(fld.Number()))) + } + + optNode := interp.file.OptionNode(opt) + val, err := interp.setOptionField(mc, msg, fld, node, optNode.GetValue(), false) + if err != nil { + return nil, interp.reporter.HandleError(err) + } + var index int32 + if fld.IsMap() { + index = int32(msg.Get(fld).Map().Len()) - 1 + } else if fld.IsList() { + index = int32(msg.Get(fld).List().Len()) - 1 + } + return &interpretedOption{ + pathPrefix: pathPrefix, + interpretedField: interpretedField{ + number: int32(fld.Number()), + index: index, + kind: fld.Kind(), + repeated: fld.Cardinality() == protoreflect.Repeated, + value: val, + // NB: don't set packed here in a top-level option + // (only values in message literals will be serialized + // in packed format) + }, + }, nil +} + +// setOptionField sets the value for field fld in the given message msg to the value represented +// by val. The given name is the AST node that corresponds to the name of fld. On success, it +// returns additional metadata about the field that was set. +func (interp *interpreter) setOptionField(mc *internal.MessageContext, msg protoreflect.Message, fld protoreflect.FieldDescriptor, name ast.Node, val ast.ValueNode, insideMsgLiteral bool) (interpretedFieldValue, error) { + v := val.Value() + if sl, ok := v.([]ast.ValueNode); ok { + // handle slices a little differently than the others + if fld.Cardinality() != protoreflect.Repeated { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue is an array but field is not repeated", mc) + } + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + var resVal listValue + var resMsgVals [][]*interpretedField + for index, item := range sl { + mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, index) + value, err := interp.fieldValue(mc, fld, item, insideMsgLiteral) + if err != nil { + return interpretedFieldValue{}, err + } + if fld.IsMap() { + setMapEntry(msg, fld, &value) + } else { + msg.Mutable(fld).List().Append(value.val) + } + resVal = append(resVal, value.val) + if value.msgVal != nil { + resMsgVals = append(resMsgVals, value.msgVal) + } + } + return interpretedFieldValue{ + isList: true, + val: protoreflect.ValueOfList(&resVal), + msgListVal: resMsgVals, + }, nil + } + + value, err := interp.fieldValue(mc, fld, val, insideMsgLiteral) + if err != nil { + return interpretedFieldValue{}, err + } + + if ood := fld.ContainingOneof(); ood != nil { + existingFld := msg.WhichOneof(ood) + if existingFld != nil && existingFld.Number() != fld.Number() { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(name).Start(), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld)) + } + } + + switch { + case fld.IsMap(): + setMapEntry(msg, fld, &value) + case fld.IsList(): + msg.Mutable(fld).List().Append(value.val) + default: + if msg.Has(fld) { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(name).Start(), "%vnon-repeated option field %s already set", mc, fieldName(fld)) + } + msg.Set(fld, value.val) + } + return value, nil +} + +func setMapEntry(msg protoreflect.Message, fld protoreflect.FieldDescriptor, value *interpretedFieldValue) { + entry := value.val.Message() + keyFld, valFld := fld.MapKey(), fld.MapValue() + // if an entry is missing a key or value, we add in an explicit + // zero value to msgVals to match protoc (which also odds these + // in even if not present in source) + if !entry.Has(keyFld) { + // put key before value + value.msgVal = append(append(([]*interpretedField)(nil), zeroValue(keyFld)), value.msgVal...) + } + if !entry.Has(valFld) { + value.msgVal = append(value.msgVal, zeroValue(valFld)) + } + key := entry.Get(keyFld) + val := entry.Get(valFld) + if dm, ok := val.Interface().(*dynamicpb.Message); ok && (dm == nil || !dm.IsValid()) { + val = protoreflect.ValueOfMessage(dynamicpb.NewMessage(valFld.Message())) + } + m := msg.Mutable(fld).Map() + // TODO: error if key is already present + m.Set(key.MapKey(), val) +} + +// zeroValue returns the zero value for the field types as a *interpretedField. +// The given fld must NOT be a repeated field. +func zeroValue(fld protoreflect.FieldDescriptor) *interpretedField { + var val protoreflect.Value + var msgVal []*interpretedField + switch fld.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + // needs to be non-nil, but empty + msgVal = []*interpretedField{} + msg := dynamicpb.NewMessage(fld.Message()) + val = protoreflect.ValueOfMessage(msg) + case protoreflect.EnumKind: + val = protoreflect.ValueOfEnum(0) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + val = protoreflect.ValueOfInt32(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + val = protoreflect.ValueOfUint32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + val = protoreflect.ValueOfInt64(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + val = protoreflect.ValueOfUint64(0) + case protoreflect.BoolKind: + val = protoreflect.ValueOfBool(false) + case protoreflect.FloatKind: + val = protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + val = protoreflect.ValueOfFloat64(0) + case protoreflect.BytesKind: + val = protoreflect.ValueOfBytes(nil) + case protoreflect.StringKind: + val = protoreflect.ValueOfString("") + } + return &interpretedField{ + number: int32(fld.Number()), + kind: fld.Kind(), + value: interpretedFieldValue{ + val: val, + msgVal: msgVal, + }, + } +} + +type listValue []protoreflect.Value + +var _ protoreflect.List = (*listValue)(nil) + +func (l listValue) Len() int { + return len(l) +} + +func (l listValue) Get(i int) protoreflect.Value { + return l[i] +} + +func (l listValue) Set(i int, value protoreflect.Value) { + l[i] = value +} + +func (l *listValue) Append(value protoreflect.Value) { + *l = append(*l, value) +} + +func (l listValue) AppendMutable() protoreflect.Value { + panic("AppendMutable not supported") +} + +func (l *listValue) Truncate(i int) { + *l = (*l)[:i] +} + +func (l listValue) NewElement() protoreflect.Value { + panic("NewElement not supported") +} + +func (l listValue) IsValid() bool { + return true +} + +func fieldName(fld protoreflect.FieldDescriptor) string { + if fld.IsExtension() { + return fmt.Sprintf("(%s)", fld.FullName()) + } + return string(fld.Name()) +} + +func valueKind(val interface{}) string { + switch val := val.(type) { + case ast.Identifier: + return "identifier" + case bool: + return "bool" + case int64: + if val < 0 { + return "negative integer" + } + return "integer" + case uint64: + return "integer" + case float64: + return "double" + case string, []byte: + return "string" + case []*ast.MessageFieldNode: + return "message" + case []ast.ValueNode: + return "array" + default: + return fmt.Sprintf("%T", val) + } +} + +// fieldValue computes a compile-time value (constant or list or message literal) for the given +// AST node val. The value in val must be assignable to the field fld. +func (interp *interpreter) fieldValue(mc *internal.MessageContext, fld protoreflect.FieldDescriptor, val ast.ValueNode, insideMsgLiteral bool) (interpretedFieldValue, error) { + k := fld.Kind() + switch k { + case protoreflect.EnumKind: + num, _, err := interp.enumFieldValue(mc, fld.Enum(), val, insideMsgLiteral) + if err != nil { + return interpretedFieldValue{}, err + } + return interpretedFieldValue{val: protoreflect.ValueOfEnum(num)}, nil + + case protoreflect.MessageKind, protoreflect.GroupKind: + v := val.Value() + if aggs, ok := v.([]*ast.MessageFieldNode); ok { + fmd := fld.Message() + return interp.messageLiteralValue(mc, aggs, fmd) + } + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting message, got %s", mc, valueKind(v)) + + default: + v, err := interp.scalarFieldValue(mc, descriptorpb.FieldDescriptorProto_Type(k), val, insideMsgLiteral) + if err != nil { + return interpretedFieldValue{}, err + } + return interpretedFieldValue{val: protoreflect.ValueOf(v)}, nil + } +} + +// enumFieldValue resolves the given AST node val as an enum value descriptor. If the given +// value is not a valid identifier, an error is returned instead. +func (interp *interpreter) enumFieldValue(mc *internal.MessageContext, ed protoreflect.EnumDescriptor, val ast.ValueNode, allowNumber bool) (protoreflect.EnumNumber, protoreflect.Name, error) { + v := val.Value() + var num protoreflect.EnumNumber + switch v := v.(type) { + case ast.Identifier: + name := protoreflect.Name(v) + ev := ed.Values().ByName(name) + if ev == nil { + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%venum %s has no value named %s", mc, ed.FullName(), v) + } + return ev.Number(), name, nil + case int64: + if !allowNumber { + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting enum name, got %s", mc, valueKind(v)) + } + if v > math.MaxInt32 || v < math.MinInt32 { + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for an enum", mc, v) + } + num = protoreflect.EnumNumber(v) + case uint64: + if !allowNumber { + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting enum name, got %s", mc, valueKind(v)) + } + if v > math.MaxInt32 { + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for an enum", mc, v) + } + num = protoreflect.EnumNumber(v) + default: + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting enum, got %s", mc, valueKind(v)) + } + ev := ed.Values().ByNumber(num) + if ev != nil { + return num, ev.Name(), nil + } + if ed.Syntax() != protoreflect.Proto3 { + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vclosed enum %s has no value with number %d", mc, ed.FullName(), num) + } + // unknown value, but enum is open, so we allow it and return blank name + return num, "", nil +} + +// scalarFieldValue resolves the given AST node val as a value whose type is assignable to a +// field with the given fldType. +func (interp *interpreter) scalarFieldValue(mc *internal.MessageContext, fldType descriptorpb.FieldDescriptorProto_Type, val ast.ValueNode, insideMsgLiteral bool) (interface{}, error) { + v := val.Value() + switch fldType { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if b, ok := v.(bool); ok { + return b, nil + } + if id, ok := v.(ast.Identifier); ok { + if insideMsgLiteral { + // inside a message literal, values use the protobuf text format, + // which is lenient in that it accepts "t" and "f" or "True" and "False" + switch id { + case "t", "true", "True": + return true, nil + case "f", "false", "False": + return false, nil + } + } else { + // options with simple scalar values (no message literal) are stricter + switch id { + case "true": + return true, nil + case "false": + return false, nil + } + } + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting bool, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + if str, ok := v.(string); ok { + return []byte(str), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting bytes, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + if str, ok := v.(string); ok { + return str, nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting string, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_INT32, descriptorpb.FieldDescriptorProto_TYPE_SINT32, descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if i, ok := v.(int64); ok { + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for int32", mc, i) + } + return int32(i), nil + } + if ui, ok := v.(uint64); ok { + if ui > math.MaxInt32 { + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for int32", mc, ui) + } + return int32(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting int32, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if i, ok := v.(int64); ok { + if i > math.MaxUint32 || i < 0 { + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for uint32", mc, i) + } + return uint32(i), nil + } + if ui, ok := v.(uint64); ok { + if ui > math.MaxUint32 { + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for uint32", mc, ui) + } + return uint32(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting uint32, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_INT64, descriptorpb.FieldDescriptorProto_TYPE_SINT64, descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + if i, ok := v.(int64); ok { + return i, nil + } + if ui, ok := v.(uint64); ok { + if ui > math.MaxInt64 { + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for int64", mc, ui) + } + return int64(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting int64, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + if i, ok := v.(int64); ok { + if i < 0 { + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for uint64", mc, i) + } + return uint64(i), nil + } + if ui, ok := v.(uint64); ok { + return ui, nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting uint64, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + if id, ok := v.(ast.Identifier); ok { + switch id { + case "inf": + return math.Inf(1), nil + case "nan": + return math.NaN(), nil + } + } + if d, ok := v.(float64); ok { + return d, nil + } + if i, ok := v.(int64); ok { + return float64(i), nil + } + if u, ok := v.(uint64); ok { + return float64(u), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting double, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if id, ok := v.(ast.Identifier); ok { + switch id { + case "inf": + return float32(math.Inf(1)), nil + case "nan": + return float32(math.NaN()), nil + } + } + if d, ok := v.(float64); ok { + return float32(d), nil + } + if i, ok := v.(int64); ok { + return float32(i), nil + } + if u, ok := v.(uint64); ok { + return float32(u), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting float, got %s", mc, valueKind(v)) + default: + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vunrecognized field type: %s", mc, fldType) + } +} + +func descriptorType(m proto.Message) string { + switch m := m.(type) { + case *descriptorpb.DescriptorProto: + return "message" + case *descriptorpb.DescriptorProto_ExtensionRange: + return "extension range" + case *descriptorpb.FieldDescriptorProto: + if m.GetExtendee() == "" { + return "field" + } + return "extension" + case *descriptorpb.EnumDescriptorProto: + return "enum" + case *descriptorpb.EnumValueDescriptorProto: + return "enum value" + case *descriptorpb.ServiceDescriptorProto: + return "service" + case *descriptorpb.MethodDescriptorProto: + return "method" + case *descriptorpb.FileDescriptorProto: + return "file" + default: + // shouldn't be possible + return fmt.Sprintf("%T", m) + } +} + +func (interp *interpreter) messageLiteralValue(mc *internal.MessageContext, fieldNodes []*ast.MessageFieldNode, fmd protoreflect.MessageDescriptor) (interpretedFieldValue, error) { + fdm := dynamicpb.NewMessage(fmd) + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + // NB: we don't want to leave this nil, even if the + // message is empty, because that indicates to + // caller that the result is not a message + flds := make([]*interpretedField, 0, len(fieldNodes)) + var foundAnyNode bool + for _, fieldNode := range fieldNodes { + if origPath == "" { + mc.OptAggPath = fieldNode.Name.Value() + } else { + mc.OptAggPath = origPath + "." + fieldNode.Name.Value() + } + if fieldNode.Name.IsAnyTypeReference() { + if fmd.FullName() != "google.protobuf.Any" { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vtype references are only allowed for google.protobuf.Any, but this type is %s", mc, fmd.FullName()) + } + if foundAnyNode { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vmultiple any type references are not allowed", mc) + } + foundAnyNode = true + urlPrefix := fieldNode.Name.URLPrefix.AsIdentifier() + msgName := fieldNode.Name.Name.AsIdentifier() + fullURL := fmt.Sprintf("%s/%s", urlPrefix, msgName) + // TODO: Support other URLs dynamically -- the caller of protoparse + // should be able to provide a fldNode custom resolver that can resolve type + // URLs into message descriptors. The default resolver would be + // implemented as below, only accepting "type.googleapis.com" and + // "type.googleprod.com" as hosts/prefixes and using the compiled + // file's transitive closure to find the named message. + if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vcould not resolve type reference %s", mc, fullURL) + } + anyFields, ok := fieldNode.Val.Value().([]*ast.MessageFieldNode) + if !ok { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Val).Start(), "%vtype references for google.protobuf.Any must have message literal value", mc) + } + anyMd := interp.file.ResolveMessageType(protoreflect.FullName(msgName)) + if anyMd == nil { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vcould not resolve type reference %s", mc, fullURL) + } + // parse the message value + msgVal, err := interp.messageLiteralValue(mc, anyFields, anyMd) + if err != nil { + return interpretedFieldValue{}, err + } + + // Any is defined with two fields: + // string type_url = 1 + // bytes value = 2 + typeURLDescriptor := fmd.Fields().ByNumber(1) + if typeURLDescriptor == nil || typeURLDescriptor.Kind() != protoreflect.StringKind { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfailed to set type_url string field on Any: %w", mc, err) + } + fdm.Set(typeURLDescriptor, protoreflect.ValueOfString(fullURL)) + valueDescriptor := fmd.Fields().ByNumber(2) + if valueDescriptor == nil || valueDescriptor.Kind() != protoreflect.BytesKind { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfailed to set value bytes field on Any: %w", mc, err) + } + b, err := proto.MarshalOptions{Deterministic: true}.Marshal(msgVal.val.Message().Interface()) + if err != nil { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Val).Start(), "%vfailed to serialize message value: %w", mc, err) + } + fdm.Set(valueDescriptor, protoreflect.ValueOfBytes(b)) + } else { + var ffld protoreflect.FieldDescriptor + if fieldNode.Name.IsExtension() { + n := interp.file.ResolveMessageLiteralExtensionName(fieldNode.Name.Name) + if n == "" { + // this should not be possible! + n = string(fieldNode.Name.Name.AsIdentifier()) + } + ffld = interp.file.ResolveExtension(protoreflect.FullName(n)) + if ffld == nil { + // may need to qualify with package name + // (this should not be necessary!) + pkg := mc.File.FileDescriptorProto().GetPackage() + if pkg != "" { + ffld = interp.file.ResolveExtension(protoreflect.FullName(pkg + "." + n)) + } + } + } else { + ffld = fmd.Fields().ByName(protoreflect.Name(fieldNode.Name.Value())) + // Groups are indicated in the text format by the group name (which is + // camel-case), NOT the field name (which is lower-case). + // ...but only regular fields, not extensions that are groups... + if ffld != nil && ffld.Kind() == protoreflect.GroupKind && ffld.Message().Name() != protoreflect.Name(fieldNode.Name.Value()) { + // this is kind of silly to fail here, but this mimics protoc behavior + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfield %s not found (did you mean the group named %s?)", mc, fieldNode.Name.Value(), ffld.Message().Name()) + } + if ffld == nil { + // could be a group name + for i := 0; i < fmd.Fields().Len(); i++ { + fd := fmd.Fields().Get(i) + if fd.Kind() == protoreflect.GroupKind && fd.Message().Name() == protoreflect.Name(fieldNode.Name.Value()) { + // found it! + ffld = fd + break + } + } + } + } + if ffld == nil { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfield %s not found", mc, string(fieldNode.Name.Name.AsIdentifier())) + } + if fieldNode.Sep == nil && ffld.Message() == nil { + // If there is no separator, the field type should be a message. + // Otherwise it is an error in the text format. + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Val).Start(), "syntax error: unexpected value, expecting ':'") + } + res, err := interp.setOptionField(mc, fdm, ffld, fieldNode.Name, fieldNode.Val, true) + if err != nil { + return interpretedFieldValue{}, err + } + flds = append(flds, &interpretedField{ + number: int32(ffld.Number()), + kind: ffld.Kind(), + repeated: ffld.Cardinality() == protoreflect.Repeated, + packed: ffld.IsPacked(), + value: res, + // NB: no need to set index here, inside message literal + // (it is only used for top-level options, for emitting + // source code info) + }) + } + } + return interpretedFieldValue{ + val: protoreflect.ValueOfMessage(fdm), + msgVal: flds, + }, nil +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/.gitignore b/vendor/github.com/bufbuild/protocompile/parser/.gitignore new file mode 100644 index 00000000..26520536 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/.gitignore @@ -0,0 +1 @@ +y.output diff --git a/vendor/github.com/bufbuild/protocompile/parser/ast.go b/vendor/github.com/bufbuild/protocompile/parser/ast.go new file mode 100644 index 00000000..105502d4 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/ast.go @@ -0,0 +1,216 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import "github.com/bufbuild/protocompile/ast" + +// the types below are accumulator types: linked lists that are +// constructed during parsing and then converted to slices of AST nodes +// once the whole list has been parsed +// TODO: change grammar to use slices of nodes instead of these constructions + +type compactOptionList struct { + option *ast.OptionNode + comma *ast.RuneNode + next *compactOptionList +} + +func (list *compactOptionList) toNodes() ([]*ast.OptionNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + opts := make([]*ast.OptionNode, l) + commas := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + opts[i] = cur.option + if cur.comma != nil { + commas[i] = cur.comma + } + } + return opts, commas +} + +type stringList struct { + str *ast.StringLiteralNode + next *stringList +} + +func (list *stringList) toStringValueNode() ast.StringValueNode { + if list.next == nil { + // single name + return list.str + } + + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + strs := make([]*ast.StringLiteralNode, l) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + strs[i] = cur.str + } + return ast.NewCompoundLiteralStringNode(strs...) +} + +type nameList struct { + name ast.StringValueNode + comma *ast.RuneNode + next *nameList +} + +func (list *nameList) toNodes() ([]ast.StringValueNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + names := make([]ast.StringValueNode, l) + commas := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + names[i] = cur.name + if cur.comma != nil { + commas[i] = cur.comma + } + } + return names, commas +} + +type rangeList struct { + rng *ast.RangeNode + comma *ast.RuneNode + next *rangeList +} + +func (list *rangeList) toNodes() ([]*ast.RangeNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + ranges := make([]*ast.RangeNode, l) + commas := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + ranges[i] = cur.rng + if cur.comma != nil { + commas[i] = cur.comma + } + } + return ranges, commas +} + +type valueList struct { + val ast.ValueNode + comma *ast.RuneNode + next *valueList +} + +func (list *valueList) toNodes() ([]ast.ValueNode, []*ast.RuneNode) { + if list == nil { + return nil, nil + } + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + vals := make([]ast.ValueNode, l) + commas := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + vals[i] = cur.val + if cur.comma != nil { + commas[i] = cur.comma + } + } + return vals, commas +} + +type fieldRefList struct { + ref *ast.FieldReferenceNode + dot *ast.RuneNode + next *fieldRefList +} + +func (list *fieldRefList) toNodes() ([]*ast.FieldReferenceNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + refs := make([]*ast.FieldReferenceNode, l) + dots := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + refs[i] = cur.ref + if cur.dot != nil { + dots[i] = cur.dot + } + } + + return refs, dots +} + +type identList struct { + ident *ast.IdentNode + dot *ast.RuneNode + next *identList +} + +func (list *identList) toIdentValueNode(leadingDot *ast.RuneNode) ast.IdentValueNode { + if list.next == nil && leadingDot == nil { + // single name + return list.ident + } + + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + idents := make([]*ast.IdentNode, l) + dots := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + idents[i] = cur.ident + if cur.dot != nil { + dots[i] = cur.dot + } + } + + return ast.NewCompoundIdentNode(leadingDot, idents, dots) +} + +type messageFieldEntry struct { + field *ast.MessageFieldNode + delimiter *ast.RuneNode +} + +type messageFieldList struct { + field *messageFieldEntry + next *messageFieldList +} + +func (list *messageFieldList) toNodes() ([]*ast.MessageFieldNode, []*ast.RuneNode) { + if list == nil { + return nil, nil + } + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + fields := make([]*ast.MessageFieldNode, l) + delimiters := make([]*ast.RuneNode, l) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + fields[i] = cur.field.field + if cur.field.delimiter != nil { + delimiters[i] = cur.field.delimiter + } + } + + return fields, delimiters +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/clone.go b/vendor/github.com/bufbuild/protocompile/parser/clone.go new file mode 100644 index 00000000..7c5505f2 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/clone.go @@ -0,0 +1,182 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" +) + +// Clone returns a copy of the given result. Since descriptor protos may be +// mutated during linking, this can return a defensive copy so that mutations +// don't impact concurrent operations in an unsafe way. This is called if the +// parse result could be re-used across concurrent operations and has unresolved +// references and options which will require mutation by the linker. +// +// If the given value has a method with the following signature, it will be +// called to perform the operation: +// +// Clone() Result +// +// If the given value does not provide a Clone method and is not the implementation +// provided by this package, it is possible for an error to occur in creating the +// copy, which may result in a panic. This can happen if the AST of the given result +// is not actually valid and a file descriptor proto cannot be successfully derived +// from it. +func Clone(r Result) Result { + if cl, ok := r.(interface{ Clone() Result }); ok { + return cl.Clone() + } + if res, ok := r.(*result); ok { + newProto := proto.Clone(res.proto).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + newNodes := make(map[proto.Message]ast.Node, len(res.nodes)) + newResult := &result{ + file: res.file, + proto: newProto, + nodes: newNodes, + } + recreateNodeIndexForFile(res, newResult, res.proto, newProto) + return newResult + } + + // Can't do the deep-copy we know how to do. So we have to take a + // different tactic. + if r.AST() == nil { + // no AST? all we have to do is copy the proto + fileProto := proto.Clone(r.FileDescriptorProto()).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + return ResultWithoutAST(fileProto) + } + // Otherwise, we have an AST, but no way to clone the result's + // internals. So just re-create them from scratch. + res, err := ResultFromAST(r.AST(), false, reporter.NewHandler(nil)) + if err != nil { + panic(err) + } + return res +} + +func recreateNodeIndexForFile(orig, clone *result, origProto, cloneProto *descriptorpb.FileDescriptorProto) { + updateNodeIndexWithOptions[*descriptorpb.FileOptions](orig, clone, origProto, cloneProto) + for i, origMd := range origProto.MessageType { + cloneMd := cloneProto.MessageType[i] + recreateNodeIndexForMessage(orig, clone, origMd, cloneMd) + } + for i, origEd := range origProto.EnumType { + cloneEd := cloneProto.EnumType[i] + recreateNodeIndexForEnum(orig, clone, origEd, cloneEd) + } + for i, origExtd := range origProto.Extension { + cloneExtd := cloneProto.Extension[i] + updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origExtd, cloneExtd) + } + for i, origSd := range origProto.Service { + cloneSd := cloneProto.Service[i] + updateNodeIndexWithOptions[*descriptorpb.ServiceOptions](orig, clone, origSd, cloneSd) + for j, origMtd := range origSd.Method { + cloneMtd := cloneSd.Method[j] + updateNodeIndexWithOptions[*descriptorpb.MethodOptions](orig, clone, origMtd, cloneMtd) + } + } +} + +func recreateNodeIndexForMessage(orig, clone *result, origProto, cloneProto *descriptorpb.DescriptorProto) { + updateNodeIndexWithOptions[*descriptorpb.MessageOptions](orig, clone, origProto, cloneProto) + for i, origFld := range origProto.Field { + cloneFld := cloneProto.Field[i] + updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origFld, cloneFld) + } + for i, origOod := range origProto.OneofDecl { + cloneOod := cloneProto.OneofDecl[i] + updateNodeIndexWithOptions[*descriptorpb.OneofOptions](orig, clone, origOod, cloneOod) + } + for i, origExtr := range origProto.ExtensionRange { + cloneExtr := cloneProto.ExtensionRange[i] + updateNodeIndexWithOptions[*descriptorpb.ExtensionRangeOptions](orig, clone, origExtr, cloneExtr) + } + for i, origRr := range origProto.ReservedRange { + cloneRr := cloneProto.ReservedRange[i] + updateNodeIndex(orig, clone, origRr, cloneRr) + } + for i, origNmd := range origProto.NestedType { + cloneNmd := cloneProto.NestedType[i] + recreateNodeIndexForMessage(orig, clone, origNmd, cloneNmd) + } + for i, origEd := range origProto.EnumType { + cloneEd := cloneProto.EnumType[i] + recreateNodeIndexForEnum(orig, clone, origEd, cloneEd) + } + for i, origExtd := range origProto.Extension { + cloneExtd := cloneProto.Extension[i] + updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origExtd, cloneExtd) + } +} + +func recreateNodeIndexForEnum(orig, clone *result, origProto, cloneProto *descriptorpb.EnumDescriptorProto) { + updateNodeIndexWithOptions[*descriptorpb.EnumOptions](orig, clone, origProto, cloneProto) + for i, origEvd := range origProto.Value { + cloneEvd := cloneProto.Value[i] + updateNodeIndexWithOptions[*descriptorpb.EnumValueOptions](orig, clone, origEvd, cloneEvd) + } + for i, origRr := range origProto.ReservedRange { + cloneRr := cloneProto.ReservedRange[i] + updateNodeIndex(orig, clone, origRr, cloneRr) + } +} + +func recreateNodeIndexForOptions(orig, clone *result, origProtos, cloneProtos []*descriptorpb.UninterpretedOption) { + for i, origOpt := range origProtos { + cloneOpt := cloneProtos[i] + updateNodeIndex(orig, clone, origOpt, cloneOpt) + for j, origName := range origOpt.Name { + cloneName := cloneOpt.Name[j] + updateNodeIndex(orig, clone, origName, cloneName) + } + } +} + +func updateNodeIndex[M proto.Message](orig, clone *result, origProto, cloneProto M) { + node := orig.nodes[origProto] + if node != nil { + clone.nodes[cloneProto] = node + } +} + +type pointerMessage[T any] interface { + *T + proto.Message +} + +type options[T any] interface { + // need this type instead of just proto.Message so we can check for nil pointer + pointerMessage[T] + GetUninterpretedOption() []*descriptorpb.UninterpretedOption +} + +type withOptions[O options[T], T any] interface { + proto.Message + GetOptions() O +} + +func updateNodeIndexWithOptions[O options[T], M withOptions[O, T], T any](orig, clone *result, origProto, cloneProto M) { + updateNodeIndex(orig, clone, origProto, cloneProto) + origOpts := origProto.GetOptions() + cloneOpts := cloneProto.GetOptions() + if origOpts != nil { + recreateNodeIndexForOptions(orig, clone, origOpts.GetUninterpretedOption(), cloneOpts.GetUninterpretedOption()) + } +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/doc.go b/vendor/github.com/bufbuild/protocompile/parser/doc.go new file mode 100644 index 00000000..d4e2c75a --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/doc.go @@ -0,0 +1,25 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parser contains the logic for parsing protobuf source code into an +// AST (abstract syntax tree) and also for converting an AST into a descriptor +// proto. +// +// A FileDescriptorProto is very similar to an AST, but the AST this package +// uses is more useful because it contains more information about the source +// code, including details about whitespace and comments, that cannot be +// represented by a descriptor proto. This makes it ideal for things like +// code formatters, which may want to preserve things like whitespace and +// comment format. +package parser diff --git a/vendor/github.com/bufbuild/protocompile/parser/errors.go b/vendor/github.com/bufbuild/protocompile/parser/errors.go new file mode 100644 index 00000000..6e34bb8c --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/errors.go @@ -0,0 +1,22 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import "errors" + +// ErrNoSyntax is a sentinel error that may be passed to a warning reporter. +// The error the reporter receives will be wrapped with source position that +// indicates the file that had no syntax statement. +var ErrNoSyntax = errors.New("no syntax specified; defaulting to proto2 syntax") diff --git a/vendor/github.com/bufbuild/protocompile/parser/lexer.go b/vendor/github.com/bufbuild/protocompile/parser/lexer.go new file mode 100644 index 00000000..e4701faa --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/lexer.go @@ -0,0 +1,760 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "math" + "strconv" + "strings" + "unicode/utf8" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" +) + +type runeReader struct { + data []byte + pos int + err error + mark int + // Enable this check to make input required to be valid UTF-8. + // For now, since protoc allows invalid UTF-8, default to false. + utf8Strict bool +} + +func (rr *runeReader) readRune() (r rune, size int, err error) { + if rr.err != nil { + return 0, 0, rr.err + } + if rr.pos == len(rr.data) { + rr.err = io.EOF + return 0, 0, rr.err + } + r, sz := utf8.DecodeRune(rr.data[rr.pos:]) + if rr.utf8Strict && r == utf8.RuneError { + rr.err = fmt.Errorf("invalid UTF8 at offset %d: %x", rr.pos, rr.data[rr.pos]) + return 0, 0, rr.err + } + rr.pos += sz + return r, sz, nil +} + +func (rr *runeReader) offset() int { + return rr.pos +} + +func (rr *runeReader) unreadRune(sz int) { + newPos := rr.pos - sz + if newPos < rr.mark { + panic("unread past mark") + } + rr.pos = newPos +} + +func (rr *runeReader) setMark() { + rr.mark = rr.pos +} + +func (rr *runeReader) getMark() string { + return string(rr.data[rr.mark:rr.pos]) +} + +type protoLex struct { + input *runeReader + info *ast.FileInfo + handler *reporter.Handler + res *ast.FileNode + + prevSym ast.TerminalNode + prevOffset int + eof ast.Token + + comments []ast.Token +} + +var utf8Bom = []byte{0xEF, 0xBB, 0xBF} + +func newLexer(in io.Reader, filename string, handler *reporter.Handler) (*protoLex, error) { + br := bufio.NewReader(in) + + // if file has UTF8 byte order marker preface, consume it + marker, err := br.Peek(3) + if err == nil && bytes.Equal(marker, utf8Bom) { + _, _ = br.Discard(3) + } + + contents, err := io.ReadAll(br) + if err != nil { + return nil, err + } + return &protoLex{ + input: &runeReader{data: contents}, + info: ast.NewFileInfo(filename, contents), + handler: handler, + }, nil +} + +var keywords = map[string]int{ + "syntax": _SYNTAX, + "import": _IMPORT, + "weak": _WEAK, + "public": _PUBLIC, + "package": _PACKAGE, + "option": _OPTION, + "true": _TRUE, + "false": _FALSE, + "inf": _INF, + "nan": _NAN, + "repeated": _REPEATED, + "optional": _OPTIONAL, + "required": _REQUIRED, + "double": _DOUBLE, + "float": _FLOAT, + "int32": _INT32, + "int64": _INT64, + "uint32": _UINT32, + "uint64": _UINT64, + "sint32": _SINT32, + "sint64": _SINT64, + "fixed32": _FIXED32, + "fixed64": _FIXED64, + "sfixed32": _SFIXED32, + "sfixed64": _SFIXED64, + "bool": _BOOL, + "string": _STRING, + "bytes": _BYTES, + "group": _GROUP, + "oneof": _ONEOF, + "map": _MAP, + "extensions": _EXTENSIONS, + "to": _TO, + "max": _MAX, + "reserved": _RESERVED, + "enum": _ENUM, + "message": _MESSAGE, + "extend": _EXTEND, + "service": _SERVICE, + "rpc": _RPC, + "stream": _STREAM, + "returns": _RETURNS, +} + +func (l *protoLex) maybeNewLine(r rune) { + if r == '\n' { + l.info.AddLine(l.input.offset()) + } +} + +func (l *protoLex) prev() ast.SourcePos { + return l.info.SourcePos(l.prevOffset) +} + +func (l *protoLex) Lex(lval *protoSymType) int { + if l.handler.ReporterError() != nil { + // if error reporter already returned non-nil error, + // we can skip the rest of the input + return 0 + } + + l.comments = nil + + for { + l.input.setMark() + + l.prevOffset = l.input.offset() + c, _, err := l.input.readRune() + if err == io.EOF { + // we're not actually returning a rune, but this will associate + // accumulated comments as a trailing comment on last symbol + // (if appropriate) + l.setRune(lval, 0) + l.eof = lval.b.Token() + return 0 + } + if err != nil { + l.setError(lval, err) + return _ERROR + } + + if strings.ContainsRune("\n\r\t\f\v ", c) { + // skip whitespace + l.maybeNewLine(c) + continue + } + + if c == '.' { + // decimal literals could start with a dot + cn, szn, err := l.input.readRune() + if err != nil { + l.setRune(lval, c) + return int(c) + } + if cn >= '0' && cn <= '9' { + l.readNumber() + token := l.input.getMark() + f, err := parseFloat(token) + if err != nil { + l.setError(lval, numError(err, "float", token)) + return _ERROR + } + l.setFloat(lval, f) + return _FLOAT_LIT + } + l.input.unreadRune(szn) + l.setRune(lval, c) + return int(c) + } + + if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') { + // identifier + l.readIdentifier() + str := l.input.getMark() + if t, ok := keywords[str]; ok { + l.setIdent(lval, str) + return t + } + l.setIdent(lval, str) + return _NAME + } + + if c >= '0' && c <= '9' { + // integer or float literal + l.readNumber() + token := l.input.getMark() + if strings.HasPrefix(token, "0x") || strings.HasPrefix(token, "0X") { + // hexadecimal + ui, err := strconv.ParseUint(token[2:], 16, 64) + if err != nil { + l.setError(lval, numError(err, "hexadecimal integer", token[2:])) + return _ERROR + } + l.setInt(lval, ui) + return _INT_LIT + } + if strings.ContainsAny(token, ".eE") { + // floating point! + f, err := parseFloat(token) + if err != nil { + l.setError(lval, numError(err, "float", token)) + return _ERROR + } + l.setFloat(lval, f) + return _FLOAT_LIT + } + // integer! (decimal or octal) + base := 10 + if token[0] == '0' { + base = 8 + } + ui, err := strconv.ParseUint(token, base, 64) + if err != nil { + kind := "integer" + if base == 8 { + kind = "octal integer" + } else if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange { + // if it's too big to be an int, parse it as a float + var f float64 + kind = "float" + f, err = parseFloat(token) + if err == nil { + l.setFloat(lval, f) + return _FLOAT_LIT + } + } + l.setError(lval, numError(err, kind, token)) + return _ERROR + } + l.setInt(lval, ui) + return _INT_LIT + } + + if c == '\'' || c == '"' { + // string literal + str, err := l.readStringLiteral(c) + if err != nil { + l.setError(lval, err) + return _ERROR + } + l.setString(lval, str) + return _STRING_LIT + } + + if c == '/' { + // comment + cn, szn, err := l.input.readRune() + if err != nil { + l.setRune(lval, '/') + return int(c) + } + if cn == '/' { + if hasErr := l.skipToEndOfLineComment(lval); hasErr { + return _ERROR + } + l.comments = append(l.comments, l.newToken()) + continue + } + if cn == '*' { + ok, hasErr := l.skipToEndOfBlockComment(lval) + if hasErr { + return _ERROR + } + if !ok { + l.setError(lval, errors.New("block comment never terminates, unexpected EOF")) + return _ERROR + } + l.comments = append(l.comments, l.newToken()) + continue + } + l.input.unreadRune(szn) + } + + if c < 32 || c == 127 { + l.setError(lval, errors.New("invalid control character")) + return _ERROR + } + if !strings.ContainsRune(";,.:=-+(){}[]<>/", c) { + l.setError(lval, errors.New("invalid character")) + return _ERROR + } + l.setRune(lval, c) + return int(c) + } +} + +func parseFloat(token string) (float64, error) { + // strconv.ParseFloat allows _ to separate digits, but protobuf does not + if strings.ContainsRune(token, '_') { + return 0, &strconv.NumError{ + Func: "parseFloat", + Num: token, + Err: strconv.ErrSyntax, + } + } + f, err := strconv.ParseFloat(token, 64) + if err == nil { + return f, nil + } + if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange && math.IsInf(f, 1) { + // protoc doesn't complain about float overflow and instead just uses "infinity" + // so we mirror that behavior by just returning infinity and ignoring the error + return f, nil + } + return f, err +} + +func (l *protoLex) newToken() ast.Token { + offset := l.input.mark + length := l.input.pos - l.input.mark + return l.info.AddToken(offset, length) +} + +func (l *protoLex) setPrevAndAddComments(n ast.TerminalNode) { + comments := l.comments + l.comments = nil + var prevTrailingComments []ast.Token + if l.prevSym != nil && len(comments) > 0 { + prevEnd := l.info.NodeInfo(l.prevSym).End().Line + info := l.info.NodeInfo(n) + nStart := info.Start().Line + if nStart == prevEnd { + if rn, ok := n.(*ast.RuneNode); ok && rn.Rune == 0 { + // if current token is EOF, pretend its on separate line + // so that the logic below can attribute a final trailing + // comment to the previous token + nStart++ + } + } + c := comments[0] + commentInfo := l.info.TokenInfo(c) + commentStart := commentInfo.Start().Line + if nStart > prevEnd && commentStart == prevEnd { + // Comment starts right after the previous token. If it's a + // line comment, we record that as a trailing comment. + // + // But if it's a block comment, it is only a trailing comment + // if there are multiple comments or if the block comment ends + // on a line before n. + canDonate := strings.HasPrefix(commentInfo.RawText(), "//") || + len(comments) > 1 || commentInfo.End().Line < nStart + + if canDonate { + prevTrailingComments = comments[:1] + comments = comments[1:] + } + } + } + + // now we can associate comments + for _, c := range prevTrailingComments { + l.info.AddComment(c, l.prevSym.Token()) + } + for _, c := range comments { + l.info.AddComment(c, n.Token()) + } + + l.prevSym = n +} + +func (l *protoLex) setString(lval *protoSymType, val string) { + lval.s = ast.NewStringLiteralNode(val, l.newToken()) + l.setPrevAndAddComments(lval.s) +} + +func (l *protoLex) setIdent(lval *protoSymType, val string) { + lval.id = ast.NewIdentNode(val, l.newToken()) + l.setPrevAndAddComments(lval.id) +} + +func (l *protoLex) setInt(lval *protoSymType, val uint64) { + lval.i = ast.NewUintLiteralNode(val, l.newToken()) + l.setPrevAndAddComments(lval.i) +} + +func (l *protoLex) setFloat(lval *protoSymType, val float64) { + lval.f = ast.NewFloatLiteralNode(val, l.newToken()) + l.setPrevAndAddComments(lval.f) +} + +func (l *protoLex) setRune(lval *protoSymType, val rune) { + lval.b = ast.NewRuneNode(val, l.newToken()) + l.setPrevAndAddComments(lval.b) +} + +func (l *protoLex) setError(lval *protoSymType, err error) { + lval.err, _ = l.addSourceError(err) +} + +func (l *protoLex) readNumber() { + allowExpSign := false + for { + c, sz, err := l.input.readRune() + if err != nil { + break + } + if (c == '-' || c == '+') && !allowExpSign { + l.input.unreadRune(sz) + break + } + allowExpSign = false + if c != '.' && c != '_' && (c < '0' || c > '9') && + (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && + c != '-' && c != '+' { + // no more chars in the number token + l.input.unreadRune(sz) + break + } + if c == 'e' || c == 'E' { + // scientific notation char can be followed by + // an exponent sign + allowExpSign = true + } + } +} + +func numError(err error, kind, s string) error { + ne, ok := err.(*strconv.NumError) + if !ok { + return err + } + if ne.Err == strconv.ErrRange { + return fmt.Errorf("value out of range for %s: %s", kind, s) + } + // syntax error + return fmt.Errorf("invalid syntax in %s value: %s", kind, s) +} + +func (l *protoLex) readIdentifier() { + for { + c, sz, err := l.input.readRune() + if err != nil { + break + } + if c != '_' && (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') { + l.input.unreadRune(sz) + break + } + } +} + +func (l *protoLex) readStringLiteral(quote rune) (string, error) { + var buf bytes.Buffer + var escapeError reporter.ErrorWithPos + var noMoreErrors bool + reportErr := func(msg, badEscape string) { + if noMoreErrors { + return + } + if escapeError != nil { + // report previous one + _, ok := l.addSourceError(escapeError) + if !ok { + noMoreErrors = true + } + } + var err error + if strings.HasSuffix(msg, "%s") { + err = fmt.Errorf(msg, badEscape) + } else { + err = errors.New(msg) + } + // we've now consumed the bad escape and lexer position is after it, so we need + // to back up to the beginning of the escape to report the correct position + escapeError = l.errWithCurrentPos(err, -len(badEscape)) + } + for { + c, _, err := l.input.readRune() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return "", err + } + if c == '\n' { + return "", errors.New("encountered end-of-line before end of string literal") + } + if c == quote { + break + } + if c == 0 { + reportErr("null character ('\\0') not allowed in string literal", string(rune(0))) + continue + } + if c == '\\' { + // escape sequence + c, _, err = l.input.readRune() + if err != nil { + return "", err + } + switch { + case c == 'x' || c == 'X': + // hex escape + c1, sz1, err := l.input.readRune() + if err != nil { + return "", err + } + if c1 == quote || c1 == '\\' { + l.input.unreadRune(sz1) + reportErr("invalid hex escape: %s", "\\"+string(c)) + continue + } + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + var hex string + if (c2 < '0' || c2 > '9') && (c2 < 'a' || c2 > 'f') && (c2 < 'A' || c2 > 'F') { + l.input.unreadRune(sz2) + hex = string(c1) + } else { + hex = string([]rune{c1, c2}) + } + i, err := strconv.ParseInt(hex, 16, 32) + if err != nil { + reportErr("invalid hex escape: %s", "\\"+string(c)+hex) + continue + } + buf.WriteByte(byte(i)) + case c >= '0' && c <= '7': + // octal escape + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + var octal string + if c2 < '0' || c2 > '7' { + l.input.unreadRune(sz2) + octal = string(c) + } else { + c3, sz3, err := l.input.readRune() + if err != nil { + return "", err + } + if c3 < '0' || c3 > '7' { + l.input.unreadRune(sz3) + octal = string([]rune{c, c2}) + } else { + octal = string([]rune{c, c2, c3}) + } + } + i, err := strconv.ParseInt(octal, 8, 32) + if err != nil { + reportErr("invalid octal escape: %s", "\\"+octal) + continue + } + if i > 0xff { + reportErr("octal escape is out range, must be between 0 and 377: %s", "\\"+octal) + continue + } + buf.WriteByte(byte(i)) + case c == 'u': + // short unicode escape + u := make([]rune, 4) + for i := range u { + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + if c2 == quote || c2 == '\\' { + l.input.unreadRune(sz2) + u = u[:i] + break + } + u[i] = c2 + } + codepointStr := string(u) + if len(u) < 4 { + reportErr("invalid unicode escape: %s", "\\u"+codepointStr) + continue + } + i, err := strconv.ParseInt(codepointStr, 16, 32) + if err != nil { + reportErr("invalid unicode escape: %s", "\\u"+codepointStr) + continue + } + buf.WriteRune(rune(i)) + case c == 'U': + // long unicode escape + u := make([]rune, 8) + for i := range u { + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + if c2 == quote || c2 == '\\' { + l.input.unreadRune(sz2) + u = u[:i] + break + } + u[i] = c2 + } + codepointStr := string(u) + if len(u) < 8 { + reportErr("invalid unicode escape: %s", "\\U"+codepointStr) + continue + } + i, err := strconv.ParseInt(string(u), 16, 32) + if err != nil { + reportErr("invalid unicode escape: %s", "\\U"+codepointStr) + continue + } + if i > 0x10ffff || i < 0 { + reportErr("unicode escape is out of range, must be between 0 and 0x10ffff: %s", "\\U"+codepointStr) + continue + } + buf.WriteRune(rune(i)) + case c == 'a': + buf.WriteByte('\a') + case c == 'b': + buf.WriteByte('\b') + case c == 'f': + buf.WriteByte('\f') + case c == 'n': + buf.WriteByte('\n') + case c == 'r': + buf.WriteByte('\r') + case c == 't': + buf.WriteByte('\t') + case c == 'v': + buf.WriteByte('\v') + case c == '\\': + buf.WriteByte('\\') + case c == '\'': + buf.WriteByte('\'') + case c == '"': + buf.WriteByte('"') + case c == '?': + buf.WriteByte('?') + default: + reportErr("invalid escape sequence: %s", "\\"+string(c)) + continue + } + } else { + buf.WriteRune(c) + } + } + if escapeError != nil { + return "", escapeError + } + return buf.String(), nil +} + +func (l *protoLex) skipToEndOfLineComment(lval *protoSymType) (hasErr bool) { + for { + c, sz, err := l.input.readRune() + if err != nil { + // eof + return false + } + switch c { + case '\n': + // don't include newline in the comment + l.input.unreadRune(sz) + return false + case 0: + l.setError(lval, errors.New("invalid control character")) + return true + } + } +} + +func (l *protoLex) skipToEndOfBlockComment(lval *protoSymType) (ok, hasErr bool) { + for { + c, _, err := l.input.readRune() + if err != nil { + return false, false + } + if c == 0 { + l.setError(lval, errors.New("invalid control character")) + return false, true + } + l.maybeNewLine(c) + if c == '*' { + c, sz, err := l.input.readRune() + if err != nil { + return false, false + } + if c == '/' { + return true, false + } + l.input.unreadRune(sz) + } + } +} + +func (l *protoLex) addSourceError(err error) (reporter.ErrorWithPos, bool) { + ewp, ok := err.(reporter.ErrorWithPos) + if !ok { + ewp = reporter.Error(l.prev(), err) + } + handlerErr := l.handler.HandleError(ewp) + return ewp, handlerErr == nil +} + +func (l *protoLex) Error(s string) { + _, _ = l.addSourceError(errors.New(s)) +} + +func (l *protoLex) errWithCurrentPos(err error, offset int) reporter.ErrorWithPos { + if ewp, ok := err.(reporter.ErrorWithPos); ok { + return ewp + } + pos := l.info.SourcePos(l.input.offset() + offset) + return reporter.Error(pos, err) +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/parser.go b/vendor/github.com/bufbuild/protocompile/parser/parser.go new file mode 100644 index 00000000..c6818637 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/parser.go @@ -0,0 +1,193 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "io" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" +) + +//go:generate goyacc -o proto.y.go -l -p proto proto.y + +func init() { + protoErrorVerbose = true + + // fix up the generated "token name" array so that error messages are nicer + setTokenName(_STRING_LIT, "string literal") + setTokenName(_INT_LIT, "int literal") + setTokenName(_FLOAT_LIT, "float literal") + setTokenName(_NAME, "identifier") + setTokenName(_ERROR, "error") + // for keywords, just show the keyword itself wrapped in quotes + for str, i := range keywords { + setTokenName(i, fmt.Sprintf(`"%s"`, str)) + } +} + +func setTokenName(token int, text string) { + // NB: this is based on logic in generated parse code that translates the + // int returned from the lexer into an internal token number. + var intern int8 + if token < len(protoTok1) { + intern = protoTok1[token] + } else { + if token >= protoPrivate { + if token < protoPrivate+len(protoTok2) { + intern = protoTok2[token-protoPrivate] + } + } + if intern == 0 { + for i := 0; i+1 < len(protoTok3); i += 2 { + if int(protoTok3[i]) == token { + intern = protoTok3[i+1] + break + } + } + } + } + + if intern >= 1 && int(intern-1) < len(protoToknames) { + protoToknames[intern-1] = text + return + } + + panic(fmt.Sprintf("Unknown token value: %d", token)) +} + +// Parse parses the given source code info and returns an AST. The given filename +// is used to construct error messages and position information. The given reader +// supplies the source code. The given handler is used to report errors and +// warnings encountered while parsing. If any errors are reported, this function +// returns a non-nil error. +// +// If the error returned is due to a syntax error in the source, then a non-nil +// AST is also returned. If the handler chooses to not abort the parse (e.g. the +// underlying error reporter returns nil instead of an error), the parser will +// attempt to recover and keep going. This allows multiple syntax errors to be +// reported in a single pass. And it also means that more of the AST can be +// populated (erroneous productions around the syntax error will of course be +// absent). +// +// The degree to which the parser can recover from errors and populate the AST +// depends on the nature of the syntax error and if there are any tokens after the +// syntax error that can help the parser recover. This error recovery and partial +// AST production is best effort. +func Parse(filename string, r io.Reader, handler *reporter.Handler) (*ast.FileNode, error) { + lx, err := newLexer(r, filename, handler) + if err != nil { + return nil, err + } + protoParse(lx) + if lx.res == nil { + // nil AST means there was an error that prevented any parsing + // or the file was empty; synthesize empty non-nil AST + lx.res = ast.NewEmptyFileNode(filename) + } + return lx.res, handler.Error() +} + +// Result is the result of constructing a descriptor proto from a parsed AST. +// From this result, the AST and the file descriptor proto can be had. This +// also contains numerous lookup functions, for looking up AST nodes that +// correspond to various elements of the descriptor hierarchy. +// +// Results can be created without AST information, using the ResultWithoutAST() +// function. All functions other than AST() will still return non-nil values, +// allowing compile operations to work with files that have only intermediate +// descriptor protos and no source code. For such results, the function that +// return AST nodes will return placeholder nodes. The position information for +// placeholder nodes contains only the filename. +type Result interface { + // AST returns the parsed abstract syntax tree. This returns nil if the + // Result was created without an AST. + AST() *ast.FileNode + // FileDescriptorProto returns the file descriptor proto. + FileDescriptorProto() *descriptorpb.FileDescriptorProto + + // FileNode returns the root of the AST. If this result has no AST then a + // placeholder node is returned. + FileNode() ast.FileDeclNode + // Node returns the AST node from which the given message was created. This + // can return nil, such as if the given message is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + Node(proto.Message) ast.Node + // OptionNode returns the AST node corresponding to the given uninterpreted + // option. This can return nil, such as if the given option is not part of + // the FileDescriptorProto hierarchy. If this result has no AST, this + // returns a placeholder node. + OptionNode(*descriptorpb.UninterpretedOption) ast.OptionDeclNode + // OptionNamePartNode returns the AST node corresponding to the given name + // part for an uninterpreted option. This can return nil, such as if the + // given name part is not part of the FileDescriptorProto hierarchy. If this + // result has no AST, this returns a placeholder node. + OptionNamePartNode(*descriptorpb.UninterpretedOption_NamePart) ast.Node + // MessageNode returns the AST node corresponding to the given message. This + // can return nil, such as if the given message is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + MessageNode(*descriptorpb.DescriptorProto) ast.MessageDeclNode + // FieldNode returns the AST node corresponding to the given field. This can + // return nil, such as if the given field is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + FieldNode(*descriptorpb.FieldDescriptorProto) ast.FieldDeclNode + // OneOfNode returns the AST node corresponding to the given oneof. This can + // return nil, such as if the given oneof is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + OneOfNode(*descriptorpb.OneofDescriptorProto) ast.Node + // ExtensionRangeNode returns the AST node corresponding to the given + // extension range. This can return nil, such as if the given range is not + // part of the FileDescriptorProto hierarchy. If this result has no AST, + // this returns a placeholder node. + ExtensionRangeNode(*descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode + // MessageReservedRangeNode returns the AST node corresponding to the given + // reserved range. This can return nil, such as if the given range is not + // part of the FileDescriptorProto hierarchy. If this result has no AST, + // this returns a placeholder node. + MessageReservedRangeNode(*descriptorpb.DescriptorProto_ReservedRange) ast.RangeDeclNode + // EnumNode returns the AST node corresponding to the given enum. This can + // return nil, such as if the given enum is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + EnumNode(*descriptorpb.EnumDescriptorProto) ast.Node + // EnumValueNode returns the AST node corresponding to the given enum. This + // can return nil, such as if the given enum value is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + EnumValueNode(*descriptorpb.EnumValueDescriptorProto) ast.EnumValueDeclNode + // EnumReservedRangeNode returns the AST node corresponding to the given + // reserved range. This can return nil, such as if the given range is not + // part of the FileDescriptorProto hierarchy. If this result has no AST, + // this returns a placeholder node. + EnumReservedRangeNode(*descriptorpb.EnumDescriptorProto_EnumReservedRange) ast.RangeDeclNode + // ServiceNode returns the AST node corresponding to the given service. This + // can return nil, such as if the given service is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + ServiceNode(*descriptorpb.ServiceDescriptorProto) ast.Node + // MethodNode returns the AST node corresponding to the given method. This + // can return nil, such as if the given method is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + MethodNode(*descriptorpb.MethodDescriptorProto) ast.RPCDeclNode +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/proto.y b/vendor/github.com/bufbuild/protocompile/parser/proto.y new file mode 100644 index 00000000..78a6c806 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/proto.y @@ -0,0 +1,1219 @@ +%{ +package parser + +//lint:file-ignore SA4006 generated parser has unused values + +import ( + "math" + + "github.com/bufbuild/protocompile/ast" +) + +%} + +// fields inside this union end up as the fields in a structure known +// as ${PREFIX}SymType, of which a reference is passed to the lexer. +%union{ + file *ast.FileNode + syn *ast.SyntaxNode + fileDecl ast.FileElement + fileDecls []ast.FileElement + pkg *ast.PackageNode + imprt *ast.ImportNode + msg *ast.MessageNode + msgDecl ast.MessageElement + msgDecls []ast.MessageElement + fld *ast.FieldNode + mapFld *ast.MapFieldNode + mapType *ast.MapTypeNode + grp *ast.GroupNode + oo *ast.OneOfNode + ooDecl ast.OneOfElement + ooDecls []ast.OneOfElement + ext *ast.ExtensionRangeNode + resvd *ast.ReservedNode + en *ast.EnumNode + enDecl ast.EnumElement + enDecls []ast.EnumElement + env *ast.EnumValueNode + extend *ast.ExtendNode + extDecl ast.ExtendElement + extDecls []ast.ExtendElement + svc *ast.ServiceNode + svcDecl ast.ServiceElement + svcDecls []ast.ServiceElement + mtd *ast.RPCNode + rpcType *ast.RPCTypeNode + rpcDecl ast.RPCElement + rpcDecls []ast.RPCElement + opt *ast.OptionNode + opts *compactOptionList + ref *ast.FieldReferenceNode + optNms *fieldRefList + cmpctOpts *ast.CompactOptionsNode + rng *ast.RangeNode + rngs *rangeList + names *nameList + cid *identList + tid ast.IdentValueNode + sl *valueList + msgField *ast.MessageFieldNode + msgEntry *messageFieldEntry + msgLit *messageFieldList + v ast.ValueNode + il ast.IntValueNode + str *stringList + s *ast.StringLiteralNode + i *ast.UintLiteralNode + f *ast.FloatLiteralNode + id *ast.IdentNode + b *ast.RuneNode + err error +} + +// any non-terminal which returns a value needs a type, which is +// really a field name in the above union struct +%type file +%type syntax +%type fileDecl +%type fileDecls +%type import +%type package +%type option compactOption +%type compactOptionDecls +%type rpcDecl +%type rpcDecls +%type optionNameComponent aggName +%type optionName +%type compactOptions +%type constant scalarConstant aggregate msgLit numLit +%type intLit +%type name keyType msgElementName extElementName oneofElementName enumElementName +%type ident msgElementIdent extElementIdent oneofElementIdent +%type typeIdent msgElementTypeIdent extElementTypeIdent oneofElementTypeIdent +%type constantList msgList +%type aggFieldEntry +%type aggField +%type aggFields +%type msgField oneofField extField +%type oneof +%type group oneofGroup +%type mapField +%type mapType +%type message +%type messageDecl +%type messageDecls +%type ooDecl +%type ooDecls +%type fieldNames +%type msgReserved enumReserved reservedNames +%type tagRange enumRange +%type tagRanges enumRanges +%type extensions +%type enum +%type enumDecl +%type enumDecls +%type enumValue +%type extend +%type extendDecl +%type extendDecls +%type stringLit +%type service +%type serviceDecl +%type serviceDecls +%type rpc +%type rpcType + +// same for terminals +%token _STRING_LIT +%token _INT_LIT +%token _FLOAT_LIT +%token _NAME +%token _SYNTAX _IMPORT _WEAK _PUBLIC _PACKAGE _OPTION _TRUE _FALSE _INF _NAN _REPEATED _OPTIONAL _REQUIRED +%token _DOUBLE _FLOAT _INT32 _INT64 _UINT32 _UINT64 _SINT32 _SINT64 _FIXED32 _FIXED64 _SFIXED32 _SFIXED64 +%token _BOOL _STRING _BYTES _GROUP _ONEOF _MAP _EXTENSIONS _TO _MAX _RESERVED _ENUM _MESSAGE _EXTEND +%token _SERVICE _RPC _STREAM _RETURNS +%token _ERROR +// we define all of these, even ones that aren't used, to improve error messages +// so it shows the unexpected symbol instead of showing "$unk" +%token '=' ';' ':' '{' '}' '\\' '/' '?' '.' ',' '>' '<' '+' '-' '(' ')' '[' ']' '*' '&' '^' '%' '$' '#' '@' '!' '~' '`' + +%% + +file : syntax { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, $1, nil, lex.eof) + lex.res = $$ + } + | fileDecls { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, nil, $1, lex.eof) + lex.res = $$ + } + | syntax fileDecls { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, $1, $2, lex.eof) + lex.res = $$ + } + | { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, nil, nil, lex.eof) + lex.res = $$ + } + +fileDecls : fileDecls fileDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | fileDecl { + if $1 != nil { + $$ = []ast.FileElement{$1} + } else { + $$ = nil + } + } + +fileDecl : import { + $$ = $1 + } + | package { + $$ = $1 + } + | option { + $$ = $1 + } + | message { + $$ = $1 + } + | enum { + $$ = $1 + } + | extend { + $$ = $1 + } + | service { + $$ = $1 + } + | ';' { + $$ = ast.NewEmptyDeclNode($1) + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +syntax : _SYNTAX '=' stringLit ';' { + $$ = ast.NewSyntaxNode($1.ToKeyword(), $2, $3.toStringValueNode(), $4) + } + +import : _IMPORT stringLit ';' { + $$ = ast.NewImportNode($1.ToKeyword(), nil, nil, $2.toStringValueNode(), $3) + } + | _IMPORT _WEAK stringLit ';' { + $$ = ast.NewImportNode($1.ToKeyword(), nil, $2.ToKeyword(), $3.toStringValueNode(), $4) + } + | _IMPORT _PUBLIC stringLit ';' { + $$ = ast.NewImportNode($1.ToKeyword(), $2.ToKeyword(), nil, $3.toStringValueNode(), $4) + } + +package : _PACKAGE ident ';' { + $$ = ast.NewPackageNode($1.ToKeyword(), $2.toIdentValueNode(nil), $3) + } + +ident : name { + $$ = &identList{$1, nil, nil} + } + | name '.' ident { + $$ = &identList{$1, $2, $3} + } + +// to mimic limitations of protoc recursive-descent parser, +// we don't allowed message statement keywords as identifiers +// (or oneof statement keywords [e.g. "option"] below) + +msgElementIdent : msgElementName { + $$ = &identList{$1, nil, nil} + } + | msgElementName '.' ident { + $$ = &identList{$1, $2, $3} + } + +extElementIdent : extElementName { + $$ = &identList{$1, nil, nil} + } + | extElementName '.' ident { + $$ = &identList{$1, $2, $3} + } + +oneofElementIdent : oneofElementName { + $$ = &identList{$1, nil, nil} + } + | oneofElementName '.' ident { + $$ = &identList{$1, $2, $3} + } + +option : _OPTION optionName '=' constant ';' { + refs, dots := $2.toNodes() + optName := ast.NewOptionNameNode(refs, dots) + $$ = ast.NewOptionNode($1.ToKeyword(), optName, $3, $4, $5) + } + +optionName : optionNameComponent { + $$ = &fieldRefList{$1, nil, nil} + } + | optionNameComponent '.' optionName { + $$ = &fieldRefList{$1, $2, $3} + } + +optionNameComponent : name { + $$ = ast.NewFieldReferenceNode($1) + } + | '(' typeIdent ')' { + $$ = ast.NewExtensionFieldReferenceNode($1, $2, $3) + } + +constant : scalarConstant + | aggregate + +scalarConstant : stringLit { + $$ = $1.toStringValueNode() + } + | numLit + | name { + $$ = $1 + } + +numLit : _FLOAT_LIT { + $$ = $1 + } + | '-' _FLOAT_LIT { + $$ = ast.NewSignedFloatLiteralNode($1, $2) + } + | '+' _FLOAT_LIT { + $$ = ast.NewSignedFloatLiteralNode($1, $2) + } + | '+' _INF { + f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) + $$ = ast.NewSignedFloatLiteralNode($1, f) + } + | '-' _INF { + f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) + $$ = ast.NewSignedFloatLiteralNode($1, f) + } + | _INT_LIT { + $$ = $1 + } + | '+' _INT_LIT { + $$ = ast.NewPositiveUintLiteralNode($1, $2) + } + | '-' _INT_LIT { + if $2.Val > math.MaxInt64 + 1 { + // can't represent as int so treat as float literal + $$ = ast.NewSignedFloatLiteralNode($1, $2) + } else { + $$ = ast.NewNegativeIntLiteralNode($1, $2) + } + } + +stringLit : _STRING_LIT { + $$ = &stringList{$1, nil} + } + | _STRING_LIT stringLit { + $$ = &stringList{$1, $2} + } + +aggregate : '{' aggFields '}' { + fields, delims := $2.toNodes() + $$ = ast.NewMessageLiteralNode($1, fields, delims, $3) + } + | '{' error '}' { + $$ = nil + } + +aggFields : aggField { + if $1 != nil { + $$ = &messageFieldList{$1, nil} + } else { + $$ = nil + } + } + | aggField aggFields { + if $1 != nil { + $$ = &messageFieldList{$1, $2} + } else { + $$ = $2 + } + } + | { + $$ = nil + } + +aggField : aggFieldEntry { + if $1 != nil { + $$ = &messageFieldEntry{$1, nil} + } else { + $$ = nil + } + } + | aggFieldEntry ',' { + if $1 != nil { + $$ = &messageFieldEntry{$1, $2} + } else { + $$ = nil + } + } + | aggFieldEntry ';' { + if $1 != nil { + $$ = &messageFieldEntry{$1, $2} + } else { + $$ = nil + } + } + | error ',' { + $$ = nil + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +aggFieldEntry : aggName ':' scalarConstant { + if $1 != nil { + $$ = ast.NewMessageFieldNode($1, $2, $3) + } else { + $$ = nil + } + } + | aggName '[' ']' { + if $1 != nil { + val := ast.NewArrayLiteralNode($2, nil, nil, $3) + $$ = ast.NewMessageFieldNode($1, nil, val) + } else { + $$ = nil + } + } + | aggName ':' '[' ']' { + if $1 != nil { + val := ast.NewArrayLiteralNode($3, nil, nil, $4) + $$ = ast.NewMessageFieldNode($1, $2, val) + } else { + $$ = nil + } + } + | aggName '[' msgList ']' { + if $1 != nil { + vals, commas := $3.toNodes() + val := ast.NewArrayLiteralNode($2, vals, commas, $4) + $$ = ast.NewMessageFieldNode($1, nil, val) + } else { + $$ = nil + } + } + | aggName ':' '[' constantList ']' { + if $1 != nil { + vals, commas := $4.toNodes() + val := ast.NewArrayLiteralNode($3, vals, commas, $5) + $$ = ast.NewMessageFieldNode($1, $2, val) + } else { + $$ = nil + } + } + | aggName ':' '[' error ']' { + $$ = nil + } + | aggName '[' error ']' { + $$ = nil + } + | aggName ':' msgLit { + if $1 != nil && $3 != nil { + $$ = ast.NewMessageFieldNode($1, $2, $3) + } else { + $$ = nil + } + } + | aggName msgLit { + if $1 != nil && $2 != nil { + $$ = ast.NewMessageFieldNode($1, nil, $2) + } else { + $$ = nil + } + } + | aggName ':' '<' error '>' { + $$ = nil + } + | aggName '<' error '>' { + $$ = nil + } + +aggName : name { + $$ = ast.NewFieldReferenceNode($1) + } + | '[' ident ']' { + $$ = ast.NewExtensionFieldReferenceNode($1, $2.toIdentValueNode(nil), $3) + } + | '[' ident '/' ident ']' { + $$ = ast.NewAnyTypeReferenceNode($1, $2.toIdentValueNode(nil), $3, $4.toIdentValueNode(nil), $5) + } + | '[' error ']' { + $$ = nil + } + +msgList : msgLit { + if $1 == nil { + $$ = nil + } else { + $$ = &valueList{$1, nil, nil} + } + } + | msgLit ',' msgList { + if $1 == nil { + $$ = nil + } else { + $$ = &valueList{$1, $2, $3} + } + } + +msgLit : aggregate { + $$ = $1 + } + | '<' aggFields '>' { + fields, delims := $2.toNodes() + $$ = ast.NewMessageLiteralNode($1, fields, delims, $3) + } + | '<' error '>' { + $$ = nil + } + +constantList : constant { + $$ = &valueList{$1, nil, nil} + } + | constant ',' constantList { + $$ = &valueList{$1, $2, $3} + } + | '<' aggFields '>' { + fields, delims := $2.toNodes() + msg := ast.NewMessageLiteralNode($1, fields, delims, $3) + $$ = &valueList{msg, nil, nil} + } + | '<' aggFields '>' ',' constantList { + fields, delims := $2.toNodes() + msg := ast.NewMessageLiteralNode($1, fields, delims, $3) + $$ = &valueList{msg, $4, $5} + } + | '<' error '>' { + $$ = nil + } + | '<' error '>' ',' constantList { + $$ = $5 + } + +typeIdent : ident { + $$ = $1.toIdentValueNode(nil) + } + | '.' ident { + $$ = $2.toIdentValueNode($1) + } + +msgElementTypeIdent : msgElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' ident { + $$ = $2.toIdentValueNode($1) + } + +extElementTypeIdent : extElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' ident { + $$ = $2.toIdentValueNode($1) + } + +oneofElementTypeIdent : oneofElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' ident { + $$ = $2.toIdentValueNode($1) + } + +msgField : _REQUIRED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) + } + | _OPTIONAL typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) + } + | _REPEATED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) + } + | _REQUIRED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | _OPTIONAL typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | _REPEATED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | msgElementTypeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) + } + | msgElementTypeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) + } + +extField : _REQUIRED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) + } + | _OPTIONAL typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) + } + | _REPEATED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) + } + | _REQUIRED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | _OPTIONAL typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | _REPEATED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | extElementTypeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) + } + | extElementTypeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) + } + +compactOptions: '[' compactOptionDecls ']' { + opts, commas := $2.toNodes() + $$ = ast.NewCompactOptionsNode($1, opts, commas, $3) + } + +compactOptionDecls : compactOption { + $$ = &compactOptionList{$1, nil, nil} + } + | compactOption ',' compactOptionDecls { + $$ = &compactOptionList{$1, $2, $3} + } + +compactOption: optionName '=' constant { + refs, dots := $1.toNodes() + optName := ast.NewOptionNameNode(refs, dots) + $$ = ast.NewCompactOptionNode(optName, $2, $3) + } + +group : _REQUIRED _GROUP name '=' _INT_LIT '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8) + } + | _OPTIONAL _GROUP name '=' _INT_LIT '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8) + } + | _REPEATED _GROUP name '=' _INT_LIT '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8) + } + | _REQUIRED _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) + } + | _OPTIONAL _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) + } + | _REPEATED _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) + } + +oneof : _ONEOF name '{' ooDecls '}' { + $$ = ast.NewOneOfNode($1.ToKeyword(), $2, $3, $4, $5) + } + +ooDecls : ooDecls ooDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | ooDecl { + if $1 != nil { + $$ = []ast.OneOfElement{$1} + } else { + $$ = nil + } + } + | { + $$ = nil + } + +ooDecl : option { + $$ = $1 + } + | oneofField { + $$ = $1 + } + | oneofGroup { + $$ = $1 + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +oneofField : oneofElementTypeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) + } + | oneofElementTypeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) + } + +oneofGroup : _GROUP name '=' _INT_LIT '{' messageDecls '}' { + $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, $3, $4, nil, $5, $6, $7) + } + | _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { + $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, $3, $4, $5, $6, $7, $8) + } + +mapField : mapType name '=' _INT_LIT ';' { + $$ = ast.NewMapFieldNode($1, $2, $3, $4, nil, $5) + } + | mapType name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewMapFieldNode($1, $2, $3, $4, $5, $6) + } + +mapType : _MAP '<' keyType ',' typeIdent '>' { + $$ = ast.NewMapTypeNode($1.ToKeyword(), $2, $3, $4, $5, $6) + } + +keyType : _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + +extensions : _EXTENSIONS tagRanges ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewExtensionRangeNode($1.ToKeyword(), ranges, commas, nil, $3) + } + | _EXTENSIONS tagRanges compactOptions ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewExtensionRangeNode($1.ToKeyword(), ranges, commas, $3, $4) + } + +tagRanges : tagRange { + $$ = &rangeList{$1, nil, nil} + } + | tagRange ',' tagRanges { + $$ = &rangeList{$1, $2, $3} + } + +tagRange : _INT_LIT { + $$ = ast.NewRangeNode($1, nil, nil, nil) + } + | _INT_LIT _TO _INT_LIT { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), $3, nil) + } + | _INT_LIT _TO _MAX { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), nil, $3.ToKeyword()) + } + +enumRanges : enumRange { + $$ = &rangeList{$1, nil, nil} + } + | enumRange ',' enumRanges { + $$ = &rangeList{$1, $2, $3} + } + +enumRange : intLit { + $$ = ast.NewRangeNode($1, nil, nil, nil) + } + | intLit _TO intLit { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), $3, nil) + } + | intLit _TO _MAX { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), nil, $3.ToKeyword()) + } + +intLit : _INT_LIT { + $$ = $1 + } + | '-' _INT_LIT { + $$ = ast.NewNegativeIntLiteralNode($1, $2) + } + +msgReserved : _RESERVED tagRanges ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewReservedRangesNode($1.ToKeyword(), ranges, commas, $3) + } + | reservedNames + +enumReserved : _RESERVED enumRanges ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewReservedRangesNode($1.ToKeyword(), ranges, commas, $3) + } + | reservedNames + +reservedNames : _RESERVED fieldNames ';' { + names, commas := $2.toNodes() + $$ = ast.NewReservedNamesNode($1.ToKeyword(), names, commas, $3) + } + +fieldNames : stringLit { + $$ = &nameList{$1.toStringValueNode(), nil, nil} + } + | stringLit ',' fieldNames { + $$ = &nameList{$1.toStringValueNode(), $2, $3} + } + +enum : _ENUM name '{' enumDecls '}' { + $$ = ast.NewEnumNode($1.ToKeyword(), $2, $3, $4, $5) + } + +enumDecls : enumDecls enumDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | enumDecl { + if $1 != nil { + $$ = []ast.EnumElement{$1} + } else { + $$ = nil + } + } + | { + $$ = nil + } + +enumDecl : option { + $$ = $1 + } + | enumValue { + $$ = $1 + } + | enumReserved { + $$ = $1 + } + | ';' { + $$ = ast.NewEmptyDeclNode($1) + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +enumValue : enumElementName '=' intLit ';' { + $$ = ast.NewEnumValueNode($1, $2, $3, nil, $4) + } + | enumElementName '=' intLit compactOptions ';' { + $$ = ast.NewEnumValueNode($1, $2, $3, $4, $5) + } + +message : _MESSAGE name '{' messageDecls '}' { + $$ = ast.NewMessageNode($1.ToKeyword(), $2, $3, $4, $5) + } + +messageDecls : messageDecls messageDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | messageDecl { + if $1 != nil { + $$ = []ast.MessageElement{$1} + } else { + $$ = nil + } + } + | { + $$ = nil + } + +messageDecl : msgField { + $$ = $1 + } + | enum { + $$ = $1 + } + | message { + $$ = $1 + } + | extend { + $$ = $1 + } + | extensions { + $$ = $1 + } + | group { + $$ = $1 + } + | option { + $$ = $1 + } + | oneof { + $$ = $1 + } + | mapField { + $$ = $1 + } + | msgReserved { + $$ = $1 + } + | ';' { + $$ = ast.NewEmptyDeclNode($1) + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +extend : _EXTEND typeIdent '{' extendDecls '}' { + $$ = ast.NewExtendNode($1.ToKeyword(), $2, $3, $4, $5) + } + +extendDecls : extendDecls extendDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | extendDecl { + if $1 != nil { + $$ = []ast.ExtendElement{$1} + } else { + $$ = nil + } + } + | { + $$ = nil + } + +extendDecl : extField { + $$ = $1 + } + | group { + $$ = $1 + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +service : _SERVICE name '{' serviceDecls '}' { + $$ = ast.NewServiceNode($1.ToKeyword(), $2, $3, $4, $5) + } + +serviceDecls : serviceDecls serviceDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | serviceDecl { + if $1 != nil { + $$ = []ast.ServiceElement{$1} + } else { + $$ = nil + } + } + | { + $$ = nil + } + +// NB: doc suggests support for "stream" declaration, separate from "rpc", but +// it does not appear to be supported in protoc (doc is likely from grammar for +// Google-internal version of protoc, with support for streaming stubby) +serviceDecl : option { + $$ = $1 + } + | rpc { + $$ = $1 + } + | ';' { + $$ = ast.NewEmptyDeclNode($1) + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +rpc : _RPC name rpcType _RETURNS rpcType ';' { + $$ = ast.NewRPCNode($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, $6) + } + | _RPC name rpcType _RETURNS rpcType '{' rpcDecls '}' { + $$ = ast.NewRPCNodeWithBody($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, $6, $7, $8) + } + +rpcType : '(' _STREAM typeIdent ')' { + $$ = ast.NewRPCTypeNode($1, $2.ToKeyword(), $3, $4) + } + | '(' typeIdent ')' { + $$ = ast.NewRPCTypeNode($1, nil, $2, $3) + } + +rpcDecls : rpcDecls rpcDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | rpcDecl { + if $1 != nil { + $$ = []ast.RPCElement{$1} + } else { + $$ = nil + } + } + | { + $$ = nil + } + +rpcDecl : option { + $$ = $1 + } + | ';' { + $$ = ast.NewEmptyDeclNode($1) + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +// excludes message, enum, oneof, extensions, reserved, extend, +// option, optional, required, and repeated +msgElementName : _NAME + | _SYNTAX + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _TRUE + | _FALSE + | _INF + | _NAN + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _MAP + | _TO + | _MAX + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes optional, required, and repeated +extElementName : _NAME + | _SYNTAX + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _OPTION + | _TRUE + | _FALSE + | _INF + | _NAN + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes reserved, option +enumElementName : _NAME + | _SYNTAX + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _TRUE + | _FALSE + | _INF + | _NAN + | _REPEATED + | _OPTIONAL + | _REQUIRED + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes option, optional, required, and repeated +oneofElementName : _NAME + | _SYNTAX + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _TRUE + | _FALSE + | _INF + | _NAN + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +name : _NAME + | _SYNTAX + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _OPTION + | _TRUE + | _FALSE + | _INF + | _NAN + | _REPEATED + | _OPTIONAL + | _REQUIRED + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +%% diff --git a/vendor/github.com/bufbuild/protocompile/parser/proto.y.go b/vendor/github.com/bufbuild/protocompile/parser/proto.y.go new file mode 100644 index 00000000..5a7e953f --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/proto.y.go @@ -0,0 +1,2408 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by goyacc -o proto.y.go -l -p proto proto.y. DO NOT EDIT. +package parser + +import __yyfmt__ "fmt" + +//lint:file-ignore SA4006 generated parser has unused values + +import ( + "math" + + "github.com/bufbuild/protocompile/ast" +) + +type protoSymType struct { + yys int + file *ast.FileNode + syn *ast.SyntaxNode + fileDecl ast.FileElement + fileDecls []ast.FileElement + pkg *ast.PackageNode + imprt *ast.ImportNode + msg *ast.MessageNode + msgDecl ast.MessageElement + msgDecls []ast.MessageElement + fld *ast.FieldNode + mapFld *ast.MapFieldNode + mapType *ast.MapTypeNode + grp *ast.GroupNode + oo *ast.OneOfNode + ooDecl ast.OneOfElement + ooDecls []ast.OneOfElement + ext *ast.ExtensionRangeNode + resvd *ast.ReservedNode + en *ast.EnumNode + enDecl ast.EnumElement + enDecls []ast.EnumElement + env *ast.EnumValueNode + extend *ast.ExtendNode + extDecl ast.ExtendElement + extDecls []ast.ExtendElement + svc *ast.ServiceNode + svcDecl ast.ServiceElement + svcDecls []ast.ServiceElement + mtd *ast.RPCNode + rpcType *ast.RPCTypeNode + rpcDecl ast.RPCElement + rpcDecls []ast.RPCElement + opt *ast.OptionNode + opts *compactOptionList + ref *ast.FieldReferenceNode + optNms *fieldRefList + cmpctOpts *ast.CompactOptionsNode + rng *ast.RangeNode + rngs *rangeList + names *nameList + cid *identList + tid ast.IdentValueNode + sl *valueList + msgField *ast.MessageFieldNode + msgEntry *messageFieldEntry + msgLit *messageFieldList + v ast.ValueNode + il ast.IntValueNode + str *stringList + s *ast.StringLiteralNode + i *ast.UintLiteralNode + f *ast.FloatLiteralNode + id *ast.IdentNode + b *ast.RuneNode + err error +} + +const _STRING_LIT = 57346 +const _INT_LIT = 57347 +const _FLOAT_LIT = 57348 +const _NAME = 57349 +const _SYNTAX = 57350 +const _IMPORT = 57351 +const _WEAK = 57352 +const _PUBLIC = 57353 +const _PACKAGE = 57354 +const _OPTION = 57355 +const _TRUE = 57356 +const _FALSE = 57357 +const _INF = 57358 +const _NAN = 57359 +const _REPEATED = 57360 +const _OPTIONAL = 57361 +const _REQUIRED = 57362 +const _DOUBLE = 57363 +const _FLOAT = 57364 +const _INT32 = 57365 +const _INT64 = 57366 +const _UINT32 = 57367 +const _UINT64 = 57368 +const _SINT32 = 57369 +const _SINT64 = 57370 +const _FIXED32 = 57371 +const _FIXED64 = 57372 +const _SFIXED32 = 57373 +const _SFIXED64 = 57374 +const _BOOL = 57375 +const _STRING = 57376 +const _BYTES = 57377 +const _GROUP = 57378 +const _ONEOF = 57379 +const _MAP = 57380 +const _EXTENSIONS = 57381 +const _TO = 57382 +const _MAX = 57383 +const _RESERVED = 57384 +const _ENUM = 57385 +const _MESSAGE = 57386 +const _EXTEND = 57387 +const _SERVICE = 57388 +const _RPC = 57389 +const _STREAM = 57390 +const _RETURNS = 57391 +const _ERROR = 57392 + +var protoToknames = [...]string{ + "$end", + "error", + "$unk", + "_STRING_LIT", + "_INT_LIT", + "_FLOAT_LIT", + "_NAME", + "_SYNTAX", + "_IMPORT", + "_WEAK", + "_PUBLIC", + "_PACKAGE", + "_OPTION", + "_TRUE", + "_FALSE", + "_INF", + "_NAN", + "_REPEATED", + "_OPTIONAL", + "_REQUIRED", + "_DOUBLE", + "_FLOAT", + "_INT32", + "_INT64", + "_UINT32", + "_UINT64", + "_SINT32", + "_SINT64", + "_FIXED32", + "_FIXED64", + "_SFIXED32", + "_SFIXED64", + "_BOOL", + "_STRING", + "_BYTES", + "_GROUP", + "_ONEOF", + "_MAP", + "_EXTENSIONS", + "_TO", + "_MAX", + "_RESERVED", + "_ENUM", + "_MESSAGE", + "_EXTEND", + "_SERVICE", + "_RPC", + "_STREAM", + "_RETURNS", + "_ERROR", + "'='", + "';'", + "':'", + "'{'", + "'}'", + "'\\\\'", + "'/'", + "'?'", + "'.'", + "','", + "'>'", + "'<'", + "'+'", + "'-'", + "'('", + "')'", + "'['", + "']'", + "'*'", + "'&'", + "'^'", + "'%'", + "'$'", + "'#'", + "'@'", + "'!'", + "'~'", + "'`'", +} + +var protoStatenames = [...]string{} + +const protoEofCode = 1 +const protoErrCode = 2 +const protoInitialStackSize = 16 + +var protoExca = [...]int16{ + -1, 0, + 1, 4, + -2, 0, + -1, 1, + 1, -1, + -2, 0, + -1, 2, + 1, 1, + -2, 0, + -1, 3, + 1, 2, + -2, 0, + -1, 22, + 1, 3, + -2, 0, + -1, 95, + 55, 185, + -2, 0, + -1, 96, + 55, 173, + -2, 0, + -1, 97, + 55, 202, + -2, 0, + -1, 99, + 55, 210, + -2, 0, + -1, 110, + 55, 54, + -2, 0, + -1, 289, + 55, 52, + 61, 52, + -2, 0, + -1, 354, + 61, 54, + -2, 0, + -1, 370, + 55, 124, + -2, 0, + -1, 404, + 61, 54, + -2, 0, + -1, 409, + 61, 54, + -2, 0, + -1, 497, + 61, 54, + -2, 0, + -1, 547, + 55, 185, + -2, 0, + -1, 551, + 55, 185, + -2, 0, + -1, 555, + 55, 185, + -2, 0, + -1, 573, + 55, 222, + -2, 0, + -1, 580, + 55, 185, + -2, 0, + -1, 583, + 55, 185, + -2, 0, + -1, 586, + 55, 185, + -2, 0, + -1, 607, + 55, 185, + -2, 0, + -1, 619, + 55, 185, + -2, 0, +} + +const protoPrivate = 57344 + +const protoLast = 2396 + +var protoAct = [...]int16{ + 31, 118, 117, 125, 8, 106, 8, 8, 496, 494, + 593, 423, 366, 406, 399, 430, 81, 325, 77, 79, + 80, 82, 84, 327, 316, 107, 8, 310, 411, 408, + 105, 280, 124, 228, 139, 177, 407, 413, 30, 425, + 544, 26, 351, 110, 605, 607, 569, 567, 412, 565, + 85, 354, 555, 87, 88, 89, 352, 75, 367, 367, + 553, 367, 367, 551, 367, 367, 549, 547, 545, 537, + 536, 531, 524, 514, 512, 367, 367, 481, 500, 499, + 367, 367, 365, 367, 574, 414, 367, 367, 110, 367, + 535, 400, 367, 109, 77, 94, 409, 367, 328, 116, + 178, 104, 405, 281, 110, 98, 328, 346, 319, 346, + 576, 292, 409, 103, 346, 345, 577, 345, 543, 301, + 346, 564, 345, 541, 346, 504, 598, 344, 345, 503, + 230, 185, 345, 115, 485, 309, 346, 313, 314, 287, + 597, 538, 515, 350, 345, 501, 480, 391, 303, 305, + 307, 349, 374, 29, 328, 368, 523, 329, 338, 320, + 343, 93, 434, 318, 317, 329, 315, 440, 441, 442, + 443, 444, 445, 17, 446, 447, 448, 449, 91, 619, + 178, 450, 451, 452, 453, 454, 455, 456, 457, 458, + 459, 460, 461, 462, 463, 464, 436, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, + 317, 185, 322, 329, 14, 517, 572, 586, 573, 438, + 4, 15, 284, 583, 16, 17, 580, 14, 596, 284, + 596, 370, 99, 17, 15, 97, 336, 16, 17, 17, + 17, 17, 96, 95, 617, 611, 591, 590, 589, 333, + 334, 335, 584, 581, 578, 19, 18, 20, 21, 337, + 230, 331, 571, 563, 13, 557, 527, 285, 19, 18, + 20, 21, 283, 519, 285, 339, 422, 13, 595, 283, + 595, 609, 390, 281, 373, 372, 342, 341, 332, 323, + 292, 302, 286, 102, 101, 100, 90, 355, 86, 25, + 561, 560, 516, 488, 358, 359, 360, 361, 362, 363, + 487, 340, 486, 420, 419, 356, 418, 417, 347, 416, + 415, 353, 397, 371, 364, 324, 92, 24, 490, 427, + 392, 369, 29, 5, 394, 395, 396, 23, 27, 28, + 122, 11, 388, 11, 11, 588, 120, 10, 389, 10, + 10, 587, 109, 299, 297, 292, 23, 355, 355, 530, + 398, 29, 312, 11, 298, 428, 296, 294, 77, 10, + 529, 528, 121, 9, 431, 9, 9, 295, 511, 510, + 509, 403, 401, 376, 377, 378, 379, 380, 381, 382, + 383, 384, 385, 386, 387, 9, 426, 508, 507, 479, + 317, 506, 482, 109, 489, 292, 478, 312, 421, 483, + 292, 393, 29, 282, 279, 3, 484, 492, 22, 12, + 227, 179, 176, 123, 326, 311, 180, 128, 429, 137, + 127, 433, 126, 431, 513, 505, 520, 521, 229, 432, + 119, 289, 434, 290, 435, 518, 235, 440, 441, 442, + 443, 444, 445, 17, 446, 447, 448, 449, 134, 437, + 522, 450, 451, 452, 453, 454, 455, 456, 457, 458, + 459, 460, 461, 462, 463, 464, 436, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, + 236, 525, 140, 183, 439, 238, 143, 526, 292, 438, + 375, 108, 532, 291, 76, 533, 592, 355, 534, 424, + 7, 6, 2, 1, 0, 542, 77, 109, 0, 546, + 548, 550, 552, 554, 556, 559, 539, 558, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, + 0, 566, 568, 570, 0, 562, 0, 0, 575, 0, + 579, 0, 0, 0, 582, 0, 0, 0, 585, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 594, 0, 0, + 0, 301, 0, 600, 301, 0, 602, 301, 0, 604, + 0, 0, 0, 0, 0, 0, 594, 0, 109, 109, + 606, 608, 301, 610, 301, 0, 301, 612, 613, 0, + 618, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 301, 0, 621, 301, 495, 0, 29, 114, 111, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 0, 0, 0, 0, 110, 0, 0, 0, + 0, 0, 0, 0, 497, 113, 112, 0, 0, 0, + 493, 29, 114, 111, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, + 0, 110, 0, 0, 0, 0, 0, 0, 0, 404, + 113, 112, 0, 0, 402, 29, 114, 111, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 0, 0, 0, 0, 110, 0, 0, 0, 0, + 0, 0, 0, 497, 113, 112, 29, 114, 111, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 0, 0, 0, 0, 110, 0, 0, 0, + 0, 0, 540, 0, 0, 113, 112, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 502, 0, 0, 0, 293, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 498, 0, 0, 0, 293, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 410, 0, 0, 0, 293, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 348, 0, 0, 0, + 293, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 288, 0, 0, + 0, 293, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 293, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 130, 0, 0, + 0, 78, 144, 145, 146, 147, 148, 149, 17, 150, + 151, 152, 153, 133, 132, 131, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 136, 142, 135, 170, 171, 138, 19, 18, + 20, 172, 173, 174, 175, 0, 0, 129, 0, 0, + 622, 130, 0, 0, 141, 0, 144, 145, 146, 147, + 148, 149, 17, 150, 151, 152, 153, 133, 132, 131, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 136, 142, 135, 170, + 171, 138, 19, 18, 20, 172, 173, 174, 175, 0, + 0, 129, 0, 0, 620, 130, 0, 0, 141, 0, + 144, 145, 146, 147, 148, 149, 17, 150, 151, 152, + 153, 133, 132, 131, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 136, 142, 135, 170, 171, 138, 19, 18, 20, 172, + 173, 174, 175, 0, 0, 129, 0, 0, 616, 130, + 0, 0, 141, 0, 144, 145, 146, 147, 148, 149, + 17, 150, 151, 152, 153, 133, 132, 131, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 136, 142, 135, 170, 171, 138, + 19, 18, 20, 172, 173, 174, 175, 0, 0, 129, + 0, 0, 615, 130, 0, 0, 141, 0, 144, 145, + 146, 147, 148, 149, 17, 150, 151, 152, 153, 133, + 132, 131, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 136, 142, + 135, 170, 171, 138, 19, 18, 20, 172, 173, 174, + 175, 0, 0, 129, 0, 0, 614, 130, 0, 0, + 141, 0, 144, 145, 146, 147, 148, 149, 17, 150, + 151, 152, 153, 133, 132, 131, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 136, 142, 135, 170, 171, 138, 19, 18, + 20, 172, 173, 174, 175, 0, 0, 129, 0, 0, + 603, 130, 0, 0, 141, 0, 144, 145, 146, 147, + 148, 149, 17, 150, 151, 152, 153, 133, 132, 131, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 136, 142, 135, 170, + 171, 138, 19, 18, 20, 172, 173, 174, 175, 0, + 0, 129, 0, 0, 601, 130, 0, 0, 141, 0, + 144, 145, 146, 147, 148, 149, 17, 150, 151, 152, + 153, 133, 132, 131, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 136, 142, 135, 170, 171, 138, 19, 18, 20, 172, + 173, 174, 175, 0, 0, 129, 0, 0, 599, 130, + 0, 0, 141, 0, 144, 145, 146, 147, 148, 149, + 17, 150, 151, 152, 153, 133, 132, 131, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 136, 142, 135, 170, 171, 138, + 19, 18, 20, 172, 173, 174, 175, 0, 0, 129, + 0, 0, 300, 130, 0, 0, 141, 0, 144, 145, + 146, 147, 148, 149, 17, 150, 151, 152, 153, 133, + 132, 131, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 136, 142, + 135, 170, 171, 138, 19, 18, 20, 172, 173, 174, + 175, 0, 0, 129, 0, 0, 231, 0, 0, 0, + 141, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 234, 233, 232, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 0, 0, 0, 0, 0, 330, + 231, 0, 0, 237, 0, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 234, 233, 232, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 83, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 491, 74, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 83, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 308, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 83, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 306, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 83, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 304, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 0, 0, 0, 0, 0, 182, 0, + 0, 0, 83, 186, 187, 188, 189, 190, 191, 17, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 184, 220, + 221, 222, 223, 224, 225, 226, 0, 182, 181, 0, + 0, 321, 186, 187, 188, 189, 190, 191, 17, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 184, 220, 221, + 222, 223, 224, 225, 226, 357, 0, 181, 0, 0, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, +} + +var protoPact = [...]int16{ + 212, -1000, 225, 225, 276, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 247, 328, 2346, 1236, 2346, 2346, + 1941, 2346, 225, -1000, 408, -1000, 246, 408, 408, 408, + 244, 119, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 275, 102, -1000, 1941, 189, + 188, 181, -1000, 2346, 178, 243, -1000, 242, 241, -1000, + -1000, 2346, 812, 1236, 33, 1781, 2255, 1888, -1000, 227, + -1000, -1000, -1000, -1000, 240, -1000, -1000, -1000, -1000, -1000, + 1175, -1000, 361, 348, -1000, -1000, -1000, 1727, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 239, 2153, 2100, 2047, 2346, 402, 2346, 2346, 357, -1000, + -1000, 2346, 46, 100, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 2206, -1000, -1000, -1000, + -1000, -1000, 237, 274, 149, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1834, -1000, -1000, + -1000, 236, 2153, 2100, 2047, 2346, -1000, 2346, 99, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 220, + -1000, -1000, -1000, -1000, 235, 2346, -1000, 105, 72, 1114, + 91, -11, -1000, 2303, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 2346, 2346, 2346, 2346, 2346, 2346, 273, + 30, 95, 291, 177, 272, 233, 232, 92, -1000, 360, + 2346, -1000, -1000, -1000, 101, 230, 87, 290, -1000, 406, + -1000, -1000, -1000, 2346, 2346, 2346, 271, -1000, 2346, -1000, + -1000, -1000, 26, -1000, -1000, -1000, -1000, -1000, 84, -1000, + -1000, 687, 34, -1000, 1053, -1000, -20, 17, 269, 268, + 266, 265, 263, 262, 403, -1000, 224, 1236, 402, 324, + 440, 401, -1000, -1000, 408, 86, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 25, + -1000, 101, 93, -1000, 261, 259, 252, 399, -1000, 279, + 1994, -1000, 622, -1000, 992, -1000, 11, 10, 85, 931, + 68, 64, -1000, 2346, -1000, 396, 393, 392, 375, 374, + 373, 22, -1000, 5, 82, 251, -1000, -1000, -1000, 160, + -1000, -1000, -1000, -1000, 221, 2346, 2346, -1000, 2346, 97, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 20, -1000, + 1941, -1000, 214, -1000, -1000, -1000, 366, 365, 354, 19, + 26, 1941, 24, -1000, 2, 1, 81, 870, 62, -1000, + -1000, 50, 57, -1000, -1000, -28, 16, 13, 14, 9, + 8, -2, -1000, 213, -1000, 1236, 812, -1000, -1000, -1000, + 250, 249, -1000, 2346, -1000, 211, 60, -1000, -3, -5, + -6, -1000, 210, 164, 18, -1000, -1000, -1000, 751, 49, + 55, -1000, -1000, -1000, -1000, -1000, 202, 1781, 172, -1000, + 201, 1781, 169, -1000, 200, 1781, 163, -1000, -1000, -1000, + 346, 340, -1000, -1000, -1000, -1000, 196, -1000, 195, -1000, + 194, -1000, -1000, 228, -1000, -1000, 80, 66, -1000, 1673, + 1781, -1000, 1619, 1781, -1000, 1565, 1781, -8, -9, -1000, + -1000, -1000, 226, -1000, -1000, -1000, 193, 751, 751, -1000, + 1511, -1000, 1457, -1000, 1403, -1000, 192, 1781, 125, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1349, 1781, + -1000, 1295, -1000, +} + +var protoPgo = [...]int16{ + 0, 513, 512, 333, 415, 511, 510, 3, 509, 11, + 10, 506, 504, 503, 39, 12, 8, 30, 5, 29, + 501, 23, 0, 500, 496, 495, 494, 493, 21, 492, + 490, 459, 16, 458, 446, 444, 9, 13, 443, 441, + 28, 440, 439, 438, 432, 32, 431, 430, 429, 372, + 1, 2, 15, 428, 24, 427, 426, 34, 425, 424, + 27, 17, 423, 346, 35, 422, 421, 340, 33, 420, + 25, 419, 31, 414, 413, 14, +} + +var protoR1 = [...]int8{ + 0, 1, 1, 1, 1, 4, 4, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 2, 5, 5, + 5, 6, 28, 28, 29, 29, 30, 30, 31, 31, + 7, 14, 14, 12, 12, 16, 16, 17, 17, 17, + 20, 20, 20, 20, 20, 20, 20, 20, 70, 70, + 18, 18, 40, 40, 40, 39, 39, 39, 39, 39, + 39, 38, 38, 38, 38, 38, 38, 38, 38, 38, + 38, 38, 13, 13, 13, 13, 37, 37, 19, 19, + 19, 36, 36, 36, 36, 36, 36, 32, 32, 33, + 33, 34, 34, 35, 35, 41, 41, 41, 41, 41, + 41, 41, 41, 43, 43, 43, 43, 43, 43, 43, + 43, 15, 9, 9, 8, 45, 45, 45, 45, 45, + 45, 44, 53, 53, 53, 52, 52, 52, 52, 52, + 42, 42, 46, 46, 47, 47, 48, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 62, + 62, 60, 60, 58, 58, 58, 61, 61, 59, 59, + 59, 21, 21, 55, 55, 56, 56, 57, 54, 54, + 63, 65, 65, 65, 64, 64, 64, 64, 64, 64, + 66, 66, 49, 51, 51, 51, 50, 50, 50, 50, + 50, 50, 50, 50, 50, 50, 50, 50, 50, 67, + 69, 69, 69, 68, 68, 68, 68, 71, 73, 73, + 73, 72, 72, 72, 72, 72, 74, 74, 75, 75, + 11, 11, 11, 10, 10, 10, 10, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, +} + +var protoR2 = [...]int8{ + 0, 1, 1, 2, 0, 2, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 2, 1, 4, 3, 4, + 4, 3, 1, 3, 1, 3, 1, 3, 1, 3, + 5, 1, 3, 1, 3, 1, 1, 1, 1, 1, + 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, + 3, 3, 1, 2, 0, 1, 2, 2, 2, 2, + 1, 3, 3, 4, 4, 5, 5, 4, 3, 2, + 5, 4, 1, 3, 5, 3, 1, 3, 1, 3, + 3, 1, 3, 3, 5, 3, 5, 1, 2, 1, + 2, 1, 2, 1, 2, 6, 6, 6, 7, 7, + 7, 5, 6, 6, 6, 6, 7, 7, 7, 5, + 6, 3, 1, 3, 3, 8, 8, 8, 9, 9, + 9, 5, 2, 1, 0, 1, 1, 1, 2, 1, + 5, 6, 7, 8, 5, 6, 6, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, + 4, 1, 3, 1, 3, 3, 1, 3, 1, 3, + 3, 1, 2, 3, 1, 3, 1, 3, 1, 3, + 5, 2, 1, 0, 1, 1, 1, 1, 2, 1, + 4, 5, 5, 2, 1, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 1, 5, + 2, 1, 0, 1, 1, 2, 1, 5, 2, 1, + 0, 1, 1, 1, 2, 1, 6, 8, 4, 3, + 2, 1, 0, 1, 1, 2, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, +} + +var protoChk = [...]int16{ + -1000, -1, -2, -4, 8, -3, -5, -6, -7, -49, + -63, -67, -71, 52, 2, 9, 12, 13, 44, 43, + 45, 46, -4, -3, 51, 52, -70, 10, 11, 4, + -28, -22, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, -14, -12, -22, 65, -22, + -22, -32, -28, 59, -22, -70, 52, -70, -70, -70, + 52, 59, 51, 59, -32, 54, 54, 54, -28, 54, + 52, 52, 52, -28, -16, -17, -18, -70, -20, -22, + 54, 6, 64, 63, 5, -14, 66, -51, -50, -41, + -63, -49, -67, -62, -45, -7, -44, -47, -55, 52, + 2, 20, 19, 18, -33, 39, 37, -48, 42, -57, + -29, 59, 38, -24, 7, 8, 9, 10, 11, 12, + 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 40, 41, 46, 47, 48, 49, -65, -64, -7, -66, + -56, 52, 2, -27, 42, -57, 7, 8, 9, 10, + 11, 12, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 43, 44, 45, 46, 47, 48, 49, -69, -68, -43, + -45, 2, 20, 19, 18, -34, -30, 59, -25, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, -73, + -72, -7, -74, 52, 2, 47, 52, -40, 2, -39, + -38, -13, -22, 67, 6, 16, 5, 6, 16, 5, + 55, -50, 52, -32, 36, -32, 36, -32, 36, -22, + -60, -58, 5, -22, -22, -60, -54, -70, -28, 62, + 59, 55, -64, 52, 51, -61, -59, -21, 5, 64, + 55, -68, 52, -32, -32, -32, -22, -28, 59, 55, + -72, 52, -22, 55, 55, 60, 52, -40, 2, 60, + 52, 53, 67, -19, 62, -18, -28, 2, -22, -22, + -22, -22, -22, -22, 51, 52, -15, 67, 60, 40, + 54, 51, 52, 52, 60, -23, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, -28, -21, + 52, 60, 40, 5, -22, -22, -22, 51, -28, -75, + 65, -17, 67, -19, 62, 68, -37, 2, -19, 62, + 2, -40, 68, 57, 68, 51, 51, 51, 51, 51, + 51, 5, 52, -9, -8, -14, -60, 5, 41, -53, + -52, -7, -42, -46, 2, -35, 36, -31, 59, -26, + 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 5, -54, + 60, 52, -15, -61, -21, 41, 51, 51, 51, 5, + 49, 48, -32, 68, -36, 2, -16, 62, 2, 68, + 68, 60, 2, 61, 61, -28, 5, 5, 5, 5, + 5, 5, 52, -15, 68, 60, 51, 55, -52, 52, + -22, -22, -28, 59, 52, -15, -32, 52, 5, 5, + 5, 52, -15, -75, -32, 66, 68, 68, 60, -40, + 2, 61, -37, 61, 68, 52, -15, 54, -15, 52, + -15, 54, -15, 52, -15, 54, -15, 52, -9, -16, + 51, 51, -28, 52, 61, 52, -15, 52, -15, 52, + -15, 52, 52, 54, 66, -36, 61, 61, 52, -51, + 54, 52, -51, 54, 52, -51, 54, 5, 5, 52, + 52, 52, -11, -10, -7, 52, 2, 60, 60, 55, + -51, 55, -51, 55, -51, 52, -15, 54, -15, 55, + -10, 52, -36, -36, 55, 55, 55, 52, -51, 54, + 55, -51, 55, +} + +var protoDef = [...]int16{ + -2, -2, -2, -2, 0, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 16, 0, 0, 0, 0, 0, + 0, 0, -2, 5, 0, 15, 0, 0, 0, 48, + 0, 22, 380, 381, 382, 383, 384, 385, 386, 387, + 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, + 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, + 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, + 418, 419, 420, 421, 422, 0, 31, 33, 0, 0, + 0, 0, 87, 0, 0, 0, 18, 0, 0, 49, + 21, 0, 0, 0, 0, -2, -2, -2, 88, -2, + 17, 19, 20, 23, 0, 35, 36, 37, 38, 39, + -2, 40, 0, 0, 45, 32, 34, 0, 184, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 198, 0, 0, 0, 0, 0, 0, 0, 0, 164, + 89, 0, 253, 24, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 254, 255, 256, 257, 258, 259, 0, 172, 174, 175, + 176, 177, 179, 0, 0, 166, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, + 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 0, 201, 203, + 204, 206, 0, 0, 0, 0, 91, 0, 26, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 0, + 209, 211, 212, 213, 215, 0, 30, 0, 60, -2, + 55, 0, 72, 0, 41, 44, 47, 42, 43, 46, + 182, 183, 197, 0, 409, 0, 409, 0, 409, 0, + 0, 151, 153, 0, 0, 0, 0, 168, 90, 0, + 0, 170, 171, 178, 0, 0, 156, 158, 161, 0, + 199, 200, 205, 0, 0, 0, 0, 92, 0, 207, + 208, 214, 0, 50, 51, 58, 59, 53, 60, 56, + 57, 0, 0, 69, -2, 78, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 149, 0, 0, 0, 0, + -2, 0, 163, 167, 0, 0, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 25, 0, + 165, 0, 0, 162, 0, 0, 0, 0, 27, 0, + 0, 61, 0, 68, -2, 62, 0, 0, 76, -2, + 60, 0, 73, 0, 75, 0, 0, 0, 0, 0, + 0, 0, 150, 0, 112, 0, 152, 154, 155, 0, + 123, 125, 126, 127, 129, 0, 366, 93, 0, 28, + 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, + 361, 362, 363, 364, 365, 367, 368, 369, 370, 371, + 372, 373, 374, 375, 376, 377, 378, 379, 0, 169, + 0, 180, 0, 157, 159, 160, 0, 0, 0, 0, + 0, 421, 0, 63, 0, 0, 81, -2, 60, 64, + 67, 0, 60, 71, 79, 0, 0, 0, 0, 0, + 0, 0, 101, 0, 111, 0, 0, 121, 122, 128, + 0, 0, 94, 0, 134, 0, 0, 181, 0, 0, + 0, 109, 0, 0, 0, 219, 65, 66, 0, 0, + 60, 70, 77, 80, 74, 95, 0, -2, 0, 96, + 0, -2, 0, 97, 0, -2, 0, 102, 113, 114, + 0, 0, 29, 135, 136, 103, 0, 104, 0, 105, + 0, 110, 216, -2, 218, 82, 83, 85, 98, 0, + -2, 99, 0, -2, 100, 0, -2, 0, 0, 106, + 107, 108, 0, 221, 223, 224, 226, 0, 0, 115, + 0, 116, 0, 117, 0, 130, 0, -2, 0, 217, + 220, 225, 84, 86, 118, 119, 120, 131, 0, -2, + 132, 0, 133, +} + +var protoTok1 = [...]int8{ + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 76, 3, 74, 73, 72, 70, 3, + 65, 66, 69, 63, 60, 64, 59, 57, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 53, 52, + 62, 51, 61, 58, 75, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 67, 56, 68, 71, 3, 78, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 54, 3, 55, 77, +} + +var protoTok2 = [...]int8{ + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, +} + +var protoTok3 = [...]int8{ + 0, +} + +var protoErrorMessages = [...]struct { + state int + token int + msg string +}{} + +/* parser for yacc output */ + +var ( + protoDebug = 0 + protoErrorVerbose = false +) + +type protoLexer interface { + Lex(lval *protoSymType) int + Error(s string) +} + +type protoParser interface { + Parse(protoLexer) int + Lookahead() int +} + +type protoParserImpl struct { + lval protoSymType + stack [protoInitialStackSize]protoSymType + char int +} + +func (p *protoParserImpl) Lookahead() int { + return p.char +} + +func protoNewParser() protoParser { + return &protoParserImpl{} +} + +const protoFlag = -1000 + +func protoTokname(c int) string { + if c >= 1 && c-1 < len(protoToknames) { + if protoToknames[c-1] != "" { + return protoToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func protoStatname(s int) string { + if s >= 0 && s < len(protoStatenames) { + if protoStatenames[s] != "" { + return protoStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func protoErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !protoErrorVerbose { + return "syntax error" + } + + for _, e := range protoErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + protoTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := int(protoPact[state]) + for tok := TOKSTART; tok-1 < len(protoToknames); tok++ { + if n := base + tok; n >= 0 && n < protoLast && int(protoChk[int(protoAct[n])]) == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if protoDef[state] == -2 { + i := 0 + for protoExca[i] != -1 || int(protoExca[i+1]) != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; protoExca[i] >= 0; i += 2 { + tok := int(protoExca[i]) + if tok < TOKSTART || protoExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if protoExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += protoTokname(tok) + } + return res +} + +func protolex1(lex protoLexer, lval *protoSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = int(protoTok1[0]) + goto out + } + if char < len(protoTok1) { + token = int(protoTok1[char]) + goto out + } + if char >= protoPrivate { + if char < protoPrivate+len(protoTok2) { + token = int(protoTok2[char-protoPrivate]) + goto out + } + } + for i := 0; i < len(protoTok3); i += 2 { + token = int(protoTok3[i+0]) + if token == char { + token = int(protoTok3[i+1]) + goto out + } + } + +out: + if token == 0 { + token = int(protoTok2[1]) /* unknown char */ + } + if protoDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", protoTokname(token), uint(char)) + } + return char, token +} + +func protoParse(protolex protoLexer) int { + return protoNewParser().Parse(protolex) +} + +func (protorcvr *protoParserImpl) Parse(protolex protoLexer) int { + var proton int + var protoVAL protoSymType + var protoDollar []protoSymType + _ = protoDollar // silence set and not used + protoS := protorcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + protostate := 0 + protorcvr.char = -1 + prototoken := -1 // protorcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + protostate = -1 + protorcvr.char = -1 + prototoken = -1 + }() + protop := -1 + goto protostack + +ret0: + return 0 + +ret1: + return 1 + +protostack: + /* put a state and value onto the stack */ + if protoDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", protoTokname(prototoken), protoStatname(protostate)) + } + + protop++ + if protop >= len(protoS) { + nyys := make([]protoSymType, len(protoS)*2) + copy(nyys, protoS) + protoS = nyys + } + protoS[protop] = protoVAL + protoS[protop].yys = protostate + +protonewstate: + proton = int(protoPact[protostate]) + if proton <= protoFlag { + goto protodefault /* simple state */ + } + if protorcvr.char < 0 { + protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval) + } + proton += prototoken + if proton < 0 || proton >= protoLast { + goto protodefault + } + proton = int(protoAct[proton]) + if int(protoChk[proton]) == prototoken { /* valid shift */ + protorcvr.char = -1 + prototoken = -1 + protoVAL = protorcvr.lval + protostate = proton + if Errflag > 0 { + Errflag-- + } + goto protostack + } + +protodefault: + /* default state action */ + proton = int(protoDef[protostate]) + if proton == -2 { + if protorcvr.char < 0 { + protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if protoExca[xi+0] == -1 && int(protoExca[xi+1]) == protostate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + proton = int(protoExca[xi+0]) + if proton < 0 || proton == prototoken { + break + } + } + proton = int(protoExca[xi+1]) + if proton < 0 { + goto ret0 + } + } + if proton == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + protolex.Error(protoErrorMessage(protostate, prototoken)) + Nerrs++ + if protoDebug >= 1 { + __yyfmt__.Printf("%s", protoStatname(protostate)) + __yyfmt__.Printf(" saw %s\n", protoTokname(prototoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for protop >= 0 { + proton = int(protoPact[protoS[protop].yys]) + protoErrCode + if proton >= 0 && proton < protoLast { + protostate = int(protoAct[proton]) /* simulate a shift of "error" */ + if int(protoChk[protostate]) == protoErrCode { + goto protostack + } + } + + /* the current p has no shift on "error", pop stack */ + if protoDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", protoS[protop].yys) + } + protop-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if protoDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", protoTokname(prototoken)) + } + if prototoken == protoEofCode { + goto ret1 + } + protorcvr.char = -1 + prototoken = -1 + goto protonewstate /* try again in the same state */ + } + } + + /* reduction by production proton */ + if protoDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", proton, protoStatname(protostate)) + } + + protont := proton + protopt := protop + _ = protopt // guard against "declared and not used" + + protop -= int(protoR2[proton]) + // protop is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if protop+1 >= len(protoS) { + nyys := make([]protoSymType, len(protoS)*2) + copy(nyys, protoS) + protoS = nyys + } + protoVAL = protoS[protop+1] + + /* consult goto table to find next state */ + proton = int(protoR1[proton]) + protog := int(protoPgo[proton]) + protoj := protog + protoS[protop].yys + 1 + + if protoj >= protoLast { + protostate = int(protoAct[protog]) + } else { + protostate = int(protoAct[protoj]) + if int(protoChk[protostate]) != -proton { + protostate = int(protoAct[protog]) + } + } + // dummy call; replaced with literal code + switch protont { + + case 1: + protoDollar = protoS[protopt-1 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, nil, lex.eof) + lex.res = protoVAL.file + } + case 2: + protoDollar = protoS[protopt-1 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, nil, protoDollar[1].fileDecls, lex.eof) + lex.res = protoVAL.file + } + case 3: + protoDollar = protoS[protopt-2 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, protoDollar[2].fileDecls, lex.eof) + lex.res = protoVAL.file + } + case 4: + protoDollar = protoS[protopt-0 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, nil, nil, lex.eof) + lex.res = protoVAL.file + } + case 5: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].fileDecl != nil { + protoVAL.fileDecls = append(protoDollar[1].fileDecls, protoDollar[2].fileDecl) + } else { + protoVAL.fileDecls = protoDollar[1].fileDecls + } + } + case 6: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].fileDecl != nil { + protoVAL.fileDecls = []ast.FileElement{protoDollar[1].fileDecl} + } else { + protoVAL.fileDecls = nil + } + } + case 7: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = protoDollar[1].imprt + } + case 8: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = protoDollar[1].pkg + } + case 9: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = protoDollar[1].opt + } + case 10: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = protoDollar[1].msg + } + case 11: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = protoDollar[1].en + } + case 12: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = protoDollar[1].extend + } + case 13: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = protoDollar[1].svc + } + case 14: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = ast.NewEmptyDeclNode(protoDollar[1].b) + } + case 15: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.fileDecl = nil + } + case 16: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileDecl = nil + } + case 17: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.syn = ast.NewSyntaxNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, protoDollar[3].str.toStringValueNode(), protoDollar[4].b) + } + case 18: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.imprt = ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, nil, protoDollar[2].str.toStringValueNode(), protoDollar[3].b) + } + case 19: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.imprt = ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, protoDollar[2].id.ToKeyword(), protoDollar[3].str.toStringValueNode(), protoDollar[4].b) + } + case 20: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.imprt = ast.NewImportNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), nil, protoDollar[3].str.toStringValueNode(), protoDollar[4].b) + } + case 21: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.pkg = ast.NewPackageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b) + } + case 22: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} + } + case 23: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} + } + case 24: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} + } + case 25: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} + } + case 26: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} + } + case 27: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} + } + case 28: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} + } + case 29: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} + } + case 30: + protoDollar = protoS[protopt-5 : protopt+1] + { + refs, dots := protoDollar[2].optNms.toNodes() + optName := ast.NewOptionNameNode(refs, dots) + protoVAL.opt = ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, protoDollar[5].b) + } + case 31: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.optNms = &fieldRefList{protoDollar[1].ref, nil, nil} + } + case 32: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.optNms = &fieldRefList{protoDollar[1].ref, protoDollar[2].b, protoDollar[3].optNms} + } + case 33: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ref = ast.NewFieldReferenceNode(protoDollar[1].id) + } + case 34: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.ref = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].tid, protoDollar[3].b) + } + case 37: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].str.toStringValueNode() + } + case 39: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].id + } + case 40: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].f + } + case 41: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) + } + case 42: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) + } + case 43: + protoDollar = protoS[protopt-2 : protopt+1] + { + f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + } + case 44: + protoDollar = protoS[protopt-2 : protopt+1] + { + f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + } + case 45: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].i + } + case 46: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewPositiveUintLiteralNode(protoDollar[1].b, protoDollar[2].i) + } + case 47: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].i.Val > math.MaxInt64+1 { + // can't represent as int so treat as float literal + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].i) + } else { + protoVAL.v = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) + } + } + case 48: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.str = &stringList{protoDollar[1].s, nil} + } + case 49: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.str = &stringList{protoDollar[1].s, protoDollar[2].str} + } + case 50: + protoDollar = protoS[protopt-3 : protopt+1] + { + fields, delims := protoDollar[2].msgLit.toNodes() + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) + } + case 51: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.v = nil + } + case 52: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].msgEntry != nil { + protoVAL.msgLit = &messageFieldList{protoDollar[1].msgEntry, nil} + } else { + protoVAL.msgLit = nil + } + } + case 53: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].msgEntry != nil { + protoVAL.msgLit = &messageFieldList{protoDollar[1].msgEntry, protoDollar[2].msgLit} + } else { + protoVAL.msgLit = protoDollar[2].msgLit + } + } + case 54: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.msgLit = nil + } + case 55: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].msgField != nil { + protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, nil} + } else { + protoVAL.msgEntry = nil + } + } + case 56: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].msgField != nil { + protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, protoDollar[2].b} + } else { + protoVAL.msgEntry = nil + } + } + case 57: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].msgField != nil { + protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, protoDollar[2].b} + } else { + protoVAL.msgEntry = nil + } + } + case 58: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.msgEntry = nil + } + case 59: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.msgEntry = nil + } + case 60: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgEntry = nil + } + case 61: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[1].ref != nil { + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, protoDollar[3].v) + } else { + protoVAL.msgField = nil + } + } + case 62: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[1].ref != nil { + val := ast.NewArrayLiteralNode(protoDollar[2].b, nil, nil, protoDollar[3].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, val) + } else { + protoVAL.msgField = nil + } + } + case 63: + protoDollar = protoS[protopt-4 : protopt+1] + { + if protoDollar[1].ref != nil { + val := ast.NewArrayLiteralNode(protoDollar[3].b, nil, nil, protoDollar[4].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, val) + } else { + protoVAL.msgField = nil + } + } + case 64: + protoDollar = protoS[protopt-4 : protopt+1] + { + if protoDollar[1].ref != nil { + vals, commas := protoDollar[3].sl.toNodes() + val := ast.NewArrayLiteralNode(protoDollar[2].b, vals, commas, protoDollar[4].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, val) + } else { + protoVAL.msgField = nil + } + } + case 65: + protoDollar = protoS[protopt-5 : protopt+1] + { + if protoDollar[1].ref != nil { + vals, commas := protoDollar[4].sl.toNodes() + val := ast.NewArrayLiteralNode(protoDollar[3].b, vals, commas, protoDollar[5].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, val) + } else { + protoVAL.msgField = nil + } + } + case 66: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.msgField = nil + } + case 67: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.msgField = nil + } + case 68: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[1].ref != nil && protoDollar[3].v != nil { + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, protoDollar[3].v) + } else { + protoVAL.msgField = nil + } + } + case 69: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].ref != nil && protoDollar[2].v != nil { + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, protoDollar[2].v) + } else { + protoVAL.msgField = nil + } + } + case 70: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.msgField = nil + } + case 71: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.msgField = nil + } + case 72: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ref = ast.NewFieldReferenceNode(protoDollar[1].id) + } + case 73: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.ref = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b) + } + case 74: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.ref = ast.NewAnyTypeReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b, protoDollar[4].cid.toIdentValueNode(nil), protoDollar[5].b) + } + case 75: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.ref = nil + } + case 76: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].v == nil { + protoVAL.sl = nil + } else { + protoVAL.sl = &valueList{protoDollar[1].v, nil, nil} + } + } + case 77: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[1].v == nil { + protoVAL.sl = nil + } else { + protoVAL.sl = &valueList{protoDollar[1].v, protoDollar[2].b, protoDollar[3].sl} + } + } + case 78: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].v + } + case 79: + protoDollar = protoS[protopt-3 : protopt+1] + { + fields, delims := protoDollar[2].msgLit.toNodes() + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) + } + case 80: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.v = nil + } + case 81: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.sl = &valueList{protoDollar[1].v, nil, nil} + } + case 82: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.sl = &valueList{protoDollar[1].v, protoDollar[2].b, protoDollar[3].sl} + } + case 83: + protoDollar = protoS[protopt-3 : protopt+1] + { + fields, delims := protoDollar[2].msgLit.toNodes() + msg := ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) + protoVAL.sl = &valueList{msg, nil, nil} + } + case 84: + protoDollar = protoS[protopt-5 : protopt+1] + { + fields, delims := protoDollar[2].msgLit.toNodes() + msg := ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) + protoVAL.sl = &valueList{msg, protoDollar[4].b, protoDollar[5].sl} + } + case 85: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.sl = nil + } + case 86: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.sl = protoDollar[5].sl + } + case 87: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 88: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 89: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 90: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 91: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 92: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 93: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 94: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 95: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + } + case 96: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + } + case 97: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + } + case 98: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + } + case 99: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + } + case 100: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + } + case 101: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) + } + case 102: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) + } + case 103: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + } + case 104: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + } + case 105: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + } + case 106: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + } + case 107: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + } + case 108: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + } + case 109: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) + } + case 110: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) + } + case 111: + protoDollar = protoS[protopt-3 : protopt+1] + { + opts, commas := protoDollar[2].opts.toNodes() + protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, opts, commas, protoDollar[3].b) + } + case 112: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.opts = &compactOptionList{protoDollar[1].opt, nil, nil} + } + case 113: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.opts = &compactOptionList{protoDollar[1].opt, protoDollar[2].b, protoDollar[3].opts} + } + case 114: + protoDollar = protoS[protopt-3 : protopt+1] + { + refs, dots := protoDollar[1].optNms.toNodes() + optName := ast.NewOptionNameNode(refs, dots) + protoVAL.opt = ast.NewCompactOptionNode(optName, protoDollar[2].b, protoDollar[3].v) + } + case 115: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) + } + case 116: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) + } + case 117: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) + } + case 118: + protoDollar = protoS[protopt-9 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) + } + case 119: + protoDollar = protoS[protopt-9 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) + } + case 120: + protoDollar = protoS[protopt-9 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) + } + case 121: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.oo = ast.NewOneOfNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].ooDecls, protoDollar[5].b) + } + case 122: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].ooDecl != nil { + protoVAL.ooDecls = append(protoDollar[1].ooDecls, protoDollar[2].ooDecl) + } else { + protoVAL.ooDecls = protoDollar[1].ooDecls + } + } + case 123: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].ooDecl != nil { + protoVAL.ooDecls = []ast.OneOfElement{protoDollar[1].ooDecl} + } else { + protoVAL.ooDecls = nil + } + } + case 124: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.ooDecls = nil + } + case 125: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooDecl = protoDollar[1].opt + } + case 126: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooDecl = protoDollar[1].fld + } + case 127: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooDecl = protoDollar[1].grp + } + case 128: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.ooDecl = nil + } + case 129: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooDecl = nil + } + case 130: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) + } + case 131: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) + } + case 132: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b, protoDollar[6].msgDecls, protoDollar[7].b) + } + case 133: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) + } + case 134: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.mapFld = ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) + } + case 135: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.mapFld = ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) + } + case 136: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.mapType = ast.NewMapTypeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, protoDollar[3].id, protoDollar[4].b, protoDollar[5].tid, protoDollar[6].b) + } + case 149: + protoDollar = protoS[protopt-3 : protopt+1] + { + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.ext = ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), ranges, commas, nil, protoDollar[3].b) + } + case 150: + protoDollar = protoS[protopt-4 : protopt+1] + { + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.ext = ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].cmpctOpts, protoDollar[4].b) + } + case 151: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rngs = &rangeList{protoDollar[1].rng, nil, nil} + } + case 152: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rngs = &rangeList{protoDollar[1].rng, protoDollar[2].b, protoDollar[3].rngs} + } + case 153: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, nil, nil, nil) + } + case 154: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), protoDollar[3].i, nil) + } + case 155: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) + } + case 156: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rngs = &rangeList{protoDollar[1].rng, nil, nil} + } + case 157: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rngs = &rangeList{protoDollar[1].rng, protoDollar[2].b, protoDollar[3].rngs} + } + case 158: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, nil, nil, nil) + } + case 159: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), protoDollar[3].il, nil) + } + case 160: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) + } + case 161: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.il = protoDollar[1].i + } + case 162: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.il = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) + } + case 163: + protoDollar = protoS[protopt-3 : protopt+1] + { + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.resvd = ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].b) + } + case 165: + protoDollar = protoS[protopt-3 : protopt+1] + { + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.resvd = ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].b) + } + case 167: + protoDollar = protoS[protopt-3 : protopt+1] + { + names, commas := protoDollar[2].names.toNodes() + protoVAL.resvd = ast.NewReservedNamesNode(protoDollar[1].id.ToKeyword(), names, commas, protoDollar[3].b) + } + case 168: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.names = &nameList{protoDollar[1].str.toStringValueNode(), nil, nil} + } + case 169: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.names = &nameList{protoDollar[1].str.toStringValueNode(), protoDollar[2].b, protoDollar[3].names} + } + case 170: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.en = ast.NewEnumNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].enDecls, protoDollar[5].b) + } + case 171: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].enDecl != nil { + protoVAL.enDecls = append(protoDollar[1].enDecls, protoDollar[2].enDecl) + } else { + protoVAL.enDecls = protoDollar[1].enDecls + } + } + case 172: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].enDecl != nil { + protoVAL.enDecls = []ast.EnumElement{protoDollar[1].enDecl} + } else { + protoVAL.enDecls = nil + } + } + case 173: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.enDecls = nil + } + case 174: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enDecl = protoDollar[1].opt + } + case 175: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enDecl = protoDollar[1].env + } + case 176: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enDecl = protoDollar[1].resvd + } + case 177: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enDecl = ast.NewEmptyDeclNode(protoDollar[1].b) + } + case 178: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.enDecl = nil + } + case 179: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enDecl = nil + } + case 180: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.env = ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, nil, protoDollar[4].b) + } + case 181: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.env = ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, protoDollar[4].cmpctOpts, protoDollar[5].b) + } + case 182: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.msg = ast.NewMessageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].msgDecls, protoDollar[5].b) + } + case 183: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].msgDecl != nil { + protoVAL.msgDecls = append(protoDollar[1].msgDecls, protoDollar[2].msgDecl) + } else { + protoVAL.msgDecls = protoDollar[1].msgDecls + } + } + case 184: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].msgDecl != nil { + protoVAL.msgDecls = []ast.MessageElement{protoDollar[1].msgDecl} + } else { + protoVAL.msgDecls = nil + } + } + case 185: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.msgDecls = nil + } + case 186: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].fld + } + case 187: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].en + } + case 188: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].msg + } + case 189: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].extend + } + case 190: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].ext + } + case 191: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].grp + } + case 192: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].opt + } + case 193: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].oo + } + case 194: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].mapFld + } + case 195: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = protoDollar[1].resvd + } + case 196: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = ast.NewEmptyDeclNode(protoDollar[1].b) + } + case 197: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.msgDecl = nil + } + case 198: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgDecl = nil + } + case 199: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.extend = ast.NewExtendNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].b, protoDollar[4].extDecls, protoDollar[5].b) + } + case 200: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].extDecl != nil { + protoVAL.extDecls = append(protoDollar[1].extDecls, protoDollar[2].extDecl) + } else { + protoVAL.extDecls = protoDollar[1].extDecls + } + } + case 201: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].extDecl != nil { + protoVAL.extDecls = []ast.ExtendElement{protoDollar[1].extDecl} + } else { + protoVAL.extDecls = nil + } + } + case 202: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.extDecls = nil + } + case 203: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.extDecl = protoDollar[1].fld + } + case 204: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.extDecl = protoDollar[1].grp + } + case 205: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.extDecl = nil + } + case 206: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.extDecl = nil + } + case 207: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.svc = ast.NewServiceNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].svcDecls, protoDollar[5].b) + } + case 208: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].svcDecl != nil { + protoVAL.svcDecls = append(protoDollar[1].svcDecls, protoDollar[2].svcDecl) + } else { + protoVAL.svcDecls = protoDollar[1].svcDecls + } + } + case 209: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].svcDecl != nil { + protoVAL.svcDecls = []ast.ServiceElement{protoDollar[1].svcDecl} + } else { + protoVAL.svcDecls = nil + } + } + case 210: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.svcDecls = nil + } + case 211: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcDecl = protoDollar[1].opt + } + case 212: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcDecl = protoDollar[1].mtd + } + case 213: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcDecl = ast.NewEmptyDeclNode(protoDollar[1].b) + } + case 214: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.svcDecl = nil + } + case 215: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcDecl = nil + } + case 216: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.mtd = ast.NewRPCNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].rpcType, protoDollar[4].id.ToKeyword(), protoDollar[5].rpcType, protoDollar[6].b) + } + case 217: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.mtd = ast.NewRPCNodeWithBody(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].rpcType, protoDollar[4].id.ToKeyword(), protoDollar[5].rpcType, protoDollar[6].b, protoDollar[7].rpcDecls, protoDollar[8].b) + } + case 218: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.rpcType = ast.NewRPCTypeNode(protoDollar[1].b, protoDollar[2].id.ToKeyword(), protoDollar[3].tid, protoDollar[4].b) + } + case 219: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rpcType = ast.NewRPCTypeNode(protoDollar[1].b, nil, protoDollar[2].tid, protoDollar[3].b) + } + case 220: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].rpcDecl != nil { + protoVAL.rpcDecls = append(protoDollar[1].rpcDecls, protoDollar[2].rpcDecl) + } else { + protoVAL.rpcDecls = protoDollar[1].rpcDecls + } + } + case 221: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].rpcDecl != nil { + protoVAL.rpcDecls = []ast.RPCElement{protoDollar[1].rpcDecl} + } else { + protoVAL.rpcDecls = nil + } + } + case 222: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.rpcDecls = nil + } + case 223: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rpcDecl = protoDollar[1].opt + } + case 224: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rpcDecl = ast.NewEmptyDeclNode(protoDollar[1].b) + } + case 225: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.rpcDecl = nil + } + case 226: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rpcDecl = nil + } + } + goto protostack /* stack new state and value */ +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/result.go b/vendor/github.com/bufbuild/protocompile/parser/result.go new file mode 100644 index 00000000..89afa2a1 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/result.go @@ -0,0 +1,928 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "bytes" + "math" + "strings" + "unicode" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/reporter" +) + +type result struct { + file *ast.FileNode + proto *descriptorpb.FileDescriptorProto + + nodes map[proto.Message]ast.Node +} + +// ResultWithoutAST returns a parse result that has no AST. All methods for +// looking up AST nodes return a placeholder node that contains only the filename +// in position information. +func ResultWithoutAST(proto *descriptorpb.FileDescriptorProto) Result { + return &result{proto: proto} +} + +// ResultFromAST constructs a descriptor proto from the given AST. The returned +// result includes the descriptor proto and also contains an index that can be +// used to lookup AST node information for elements in the descriptor proto +// hierarchy. +// +// If validate is true, some basic validation is performed, to make sure the +// resulting descriptor proto is valid per protobuf rules and semantics. Only +// some language elements can be validated since some rules and semantics can +// only be checked after all symbols are all resolved, which happens in the +// linking step. +// +// The given handler is used to report any errors or warnings encountered. If any +// errors are reported, this function returns a non-nil error. +func ResultFromAST(file *ast.FileNode, validate bool, handler *reporter.Handler) (Result, error) { + filename := file.Name() + r := &result{file: file, nodes: map[proto.Message]ast.Node{}} + r.createFileDescriptor(filename, file, handler) + if validate { + validateBasic(r, handler) + } + // Now that we're done validating, we can set any missing labels to optional + // (we leave them absent in first pass if label was missing in source, so we + // can do validation on presence of label, but final descriptors are expected + // to always have them present). + fillInMissingLabels(r.proto) + return r, handler.Error() +} + +func (r *result) AST() *ast.FileNode { + return r.file +} + +func (r *result) FileDescriptorProto() *descriptorpb.FileDescriptorProto { + return r.proto +} + +func (r *result) createFileDescriptor(filename string, file *ast.FileNode, handler *reporter.Handler) { + fd := &descriptorpb.FileDescriptorProto{Name: proto.String(filename)} + r.proto = fd + + r.putFileNode(fd, file) + + isProto3 := false + if file.Syntax != nil { + if file.Syntax.Syntax.AsString() == "proto3" { + isProto3 = true + } else if file.Syntax.Syntax.AsString() != "proto2" { + nodeInfo := file.NodeInfo(file.Syntax.Syntax) + if handler.HandleErrorf(nodeInfo.Start(), `syntax value must be "proto2" or "proto3"`) != nil { + return + } + } + + // proto2 is the default, so no need to set unless proto3 + if isProto3 { + fd.Syntax = proto.String(file.Syntax.Syntax.AsString()) + } + } else { + nodeInfo := file.NodeInfo(file) + handler.HandleWarningWithPos(nodeInfo.Start(), ErrNoSyntax) + } + + for _, decl := range file.Decls { + if handler.ReporterError() != nil { + return + } + switch decl := decl.(type) { + case *ast.EnumNode: + fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl, handler)) + case *ast.ExtendNode: + r.addExtensions(decl, &fd.Extension, &fd.MessageType, isProto3, handler, 0) + case *ast.ImportNode: + index := len(fd.Dependency) + fd.Dependency = append(fd.Dependency, decl.Name.AsString()) + if decl.Public != nil { + fd.PublicDependency = append(fd.PublicDependency, int32(index)) + } else if decl.Weak != nil { + fd.WeakDependency = append(fd.WeakDependency, int32(index)) + } + case *ast.MessageNode: + fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl, isProto3, handler, 1)) + case *ast.OptionNode: + if fd.Options == nil { + fd.Options = &descriptorpb.FileOptions{} + } + fd.Options.UninterpretedOption = append(fd.Options.UninterpretedOption, r.asUninterpretedOption(decl)) + case *ast.ServiceNode: + fd.Service = append(fd.Service, r.asServiceDescriptor(decl)) + case *ast.PackageNode: + if fd.Package != nil { + nodeInfo := file.NodeInfo(decl) + if handler.HandleErrorf(nodeInfo.Start(), "files should have only one package declaration") != nil { + return + } + } + pkgName := string(decl.Name.AsIdentifier()) + if len(pkgName) >= 512 { + nodeInfo := file.NodeInfo(decl.Name) + if handler.HandleErrorf(nodeInfo.Start(), "package name (with whitespace removed) must be less than 512 characters long") != nil { + return + } + } + if strings.Count(pkgName, ".") > 100 { + nodeInfo := file.NodeInfo(decl.Name) + if handler.HandleErrorf(nodeInfo.Start(), "package name may not contain more than 100 periods") != nil { + return + } + } + fd.Package = proto.String(string(decl.Name.AsIdentifier())) + } + } +} + +func (r *result) asUninterpretedOptions(nodes []*ast.OptionNode) []*descriptorpb.UninterpretedOption { + if len(nodes) == 0 { + return nil + } + opts := make([]*descriptorpb.UninterpretedOption, len(nodes)) + for i, n := range nodes { + opts[i] = r.asUninterpretedOption(n) + } + return opts +} + +func (r *result) asUninterpretedOption(node *ast.OptionNode) *descriptorpb.UninterpretedOption { + opt := &descriptorpb.UninterpretedOption{Name: r.asUninterpretedOptionName(node.Name.Parts)} + r.putOptionNode(opt, node) + + switch val := node.Val.Value().(type) { + case bool: + if val { + opt.IdentifierValue = proto.String("true") + } else { + opt.IdentifierValue = proto.String("false") + } + case int64: + opt.NegativeIntValue = proto.Int64(val) + case uint64: + opt.PositiveIntValue = proto.Uint64(val) + case float64: + opt.DoubleValue = proto.Float64(val) + case string: + opt.StringValue = []byte(val) + case ast.Identifier: + opt.IdentifierValue = proto.String(string(val)) + default: + // the grammar does not allow arrays here, so the only possible case + // left should be []*ast.MessageFieldNode, which corresponds to an + // *ast.MessageLiteralNode + if n, ok := node.Val.(*ast.MessageLiteralNode); ok { + var buf bytes.Buffer + for i, el := range n.Elements { + flattenNode(r.file, el, &buf) + if len(n.Seps) > i && n.Seps[i] != nil { + buf.WriteRune(' ') + buf.WriteRune(n.Seps[i].Rune) + } + } + aggStr := buf.String() + opt.AggregateValue = proto.String(aggStr) + } + // TODO: else that reports an error or panics?? + } + return opt +} + +func flattenNode(f *ast.FileNode, n ast.Node, buf *bytes.Buffer) { + if cn, ok := n.(ast.CompositeNode); ok { + for _, ch := range cn.Children() { + flattenNode(f, ch, buf) + } + return + } + + if buf.Len() > 0 { + buf.WriteRune(' ') + } + buf.WriteString(f.NodeInfo(n).RawText()) +} + +func (r *result) asUninterpretedOptionName(parts []*ast.FieldReferenceNode) []*descriptorpb.UninterpretedOption_NamePart { + ret := make([]*descriptorpb.UninterpretedOption_NamePart, len(parts)) + for i, part := range parts { + np := &descriptorpb.UninterpretedOption_NamePart{ + NamePart: proto.String(string(part.Name.AsIdentifier())), + IsExtension: proto.Bool(part.IsExtension()), + } + r.putOptionNamePartNode(np, part) + ret[i] = np + } + return ret +} + +func (r *result) addExtensions(ext *ast.ExtendNode, flds *[]*descriptorpb.FieldDescriptorProto, msgs *[]*descriptorpb.DescriptorProto, isProto3 bool, handler *reporter.Handler, depth int) { + extendee := string(ext.Extendee.AsIdentifier()) + count := 0 + for _, decl := range ext.Decls { + switch decl := decl.(type) { + case *ast.FieldNode: + count++ + // use higher limit since we don't know yet whether extendee is messageset wire format + fd := r.asFieldDescriptor(decl, internal.MaxTag, isProto3, handler) + fd.Extendee = proto.String(extendee) + *flds = append(*flds, fd) + case *ast.GroupNode: + count++ + // ditto: use higher limit right now + fd, md := r.asGroupDescriptors(decl, isProto3, internal.MaxTag, handler, depth+1) + fd.Extendee = proto.String(extendee) + *flds = append(*flds, fd) + *msgs = append(*msgs, md) + } + } + if count == 0 { + nodeInfo := r.file.NodeInfo(ext) + _ = handler.HandleErrorf(nodeInfo.Start(), "extend sections must define at least one extension") + } +} + +func asLabel(lbl *ast.FieldLabel) *descriptorpb.FieldDescriptorProto_Label { + if !lbl.IsPresent() { + return nil + } + switch { + case lbl.Repeated: + return descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum() + case lbl.Required: + return descriptorpb.FieldDescriptorProto_LABEL_REQUIRED.Enum() + default: + return descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } +} + +func (r *result) asFieldDescriptor(node *ast.FieldNode, maxTag int32, isProto3 bool, handler *reporter.Handler) *descriptorpb.FieldDescriptorProto { + tag := node.Tag.Val + if err := r.checkTag(node.Tag, tag, maxTag); err != nil { + _ = handler.HandleError(err) + } + fd := newFieldDescriptor(node.Name.Val, string(node.FldType.AsIdentifier()), int32(tag), asLabel(&node.Label)) + r.putFieldNode(fd, node) + if opts := node.Options.GetElements(); len(opts) > 0 { + fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + if isProto3 && fd.Label != nil && fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { + fd.Proto3Optional = proto.Bool(true) + } + return fd +} + +var fieldTypes = map[string]descriptorpb.FieldDescriptorProto_Type{ + "double": descriptorpb.FieldDescriptorProto_TYPE_DOUBLE, + "float": descriptorpb.FieldDescriptorProto_TYPE_FLOAT, + "int32": descriptorpb.FieldDescriptorProto_TYPE_INT32, + "int64": descriptorpb.FieldDescriptorProto_TYPE_INT64, + "uint32": descriptorpb.FieldDescriptorProto_TYPE_UINT32, + "uint64": descriptorpb.FieldDescriptorProto_TYPE_UINT64, + "sint32": descriptorpb.FieldDescriptorProto_TYPE_SINT32, + "sint64": descriptorpb.FieldDescriptorProto_TYPE_SINT64, + "fixed32": descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + "fixed64": descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + "sfixed32": descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + "sfixed64": descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + "bool": descriptorpb.FieldDescriptorProto_TYPE_BOOL, + "string": descriptorpb.FieldDescriptorProto_TYPE_STRING, + "bytes": descriptorpb.FieldDescriptorProto_TYPE_BYTES, +} + +func newFieldDescriptor(name string, fieldType string, tag int32, lbl *descriptorpb.FieldDescriptorProto_Label) *descriptorpb.FieldDescriptorProto { + fd := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(name), + JsonName: proto.String(internal.JSONName(name)), + Number: proto.Int32(tag), + Label: lbl, + } + t, ok := fieldTypes[fieldType] + if ok { + fd.Type = t.Enum() + } else { + // NB: we don't have enough info to determine whether this is an enum + // or a message type, so we'll leave Type nil and set it later + // (during linking) + fd.TypeName = proto.String(fieldType) + } + return fd +} + +func (r *result) asGroupDescriptors(group *ast.GroupNode, isProto3 bool, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { + tag := group.Tag.Val + if err := r.checkTag(group.Tag, tag, maxTag); err != nil { + _ = handler.HandleError(err) + } + if !unicode.IsUpper(rune(group.Name.Val[0])) { + nameNodeInfo := r.file.NodeInfo(group.Name) + _ = handler.HandleErrorf(nameNodeInfo.Start(), "group %s should have a name that starts with a capital letter", group.Name.Val) + } + fieldName := strings.ToLower(group.Name.Val) + fd := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(fieldName), + JsonName: proto.String(internal.JSONName(fieldName)), + Number: proto.Int32(int32(tag)), + Label: asLabel(&group.Label), + Type: descriptorpb.FieldDescriptorProto_TYPE_GROUP.Enum(), + TypeName: proto.String(group.Name.Val), + } + r.putFieldNode(fd, group) + if opts := group.Options.GetElements(); len(opts) > 0 { + fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + md := &descriptorpb.DescriptorProto{Name: proto.String(group.Name.Val)} + r.putMessageNode(md, group) + // don't bother processing body if we've exceeded depth + if r.checkDepth(depth, group, handler) { + r.addMessageBody(md, &group.MessageBody, isProto3, handler, depth) + } + return fd, md +} + +func (r *result) asMapDescriptors(mapField *ast.MapFieldNode, isProto3 bool, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { + tag := mapField.Tag.Val + if err := r.checkTag(mapField.Tag, tag, maxTag); err != nil { + _ = handler.HandleError(err) + } + r.checkDepth(depth, mapField, handler) + var lbl *descriptorpb.FieldDescriptorProto_Label + if !isProto3 { + lbl = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + keyFd := newFieldDescriptor("key", mapField.MapType.KeyType.Val, 1, lbl) + r.putFieldNode(keyFd, mapField.KeyField()) + valFd := newFieldDescriptor("value", string(mapField.MapType.ValueType.AsIdentifier()), 2, lbl) + r.putFieldNode(valFd, mapField.ValueField()) + entryName := internal.InitCap(internal.JSONName(mapField.Name.Val)) + "Entry" + fd := newFieldDescriptor(mapField.Name.Val, entryName, int32(tag), descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum()) + if opts := mapField.Options.GetElements(); len(opts) > 0 { + fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + r.putFieldNode(fd, mapField) + md := &descriptorpb.DescriptorProto{ + Name: proto.String(entryName), + Options: &descriptorpb.MessageOptions{MapEntry: proto.Bool(true)}, + Field: []*descriptorpb.FieldDescriptorProto{keyFd, valFd}, + } + r.putMessageNode(md, mapField) + return fd, md +} + +func (r *result) asExtensionRanges(node *ast.ExtensionRangeNode, maxTag int32, handler *reporter.Handler) []*descriptorpb.DescriptorProto_ExtensionRange { + opts := r.asUninterpretedOptions(node.Options.GetElements()) + ers := make([]*descriptorpb.DescriptorProto_ExtensionRange, len(node.Ranges)) + for i, rng := range node.Ranges { + start, end := r.getRangeBounds(rng, 1, maxTag, handler) + er := &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(start), + End: proto.Int32(end + 1), + } + if len(opts) > 0 { + er.Options = &descriptorpb.ExtensionRangeOptions{UninterpretedOption: opts} + } + r.putExtensionRangeNode(er, rng) + ers[i] = er + } + return ers +} + +func (r *result) asEnumValue(ev *ast.EnumValueNode, handler *reporter.Handler) *descriptorpb.EnumValueDescriptorProto { + num, ok := ast.AsInt32(ev.Number, math.MinInt32, math.MaxInt32) + if !ok { + numberNodeInfo := r.file.NodeInfo(ev.Number) + _ = handler.HandleErrorf(numberNodeInfo.Start(), "value %d is out of range: should be between %d and %d", ev.Number.Value(), math.MinInt32, math.MaxInt32) + } + evd := &descriptorpb.EnumValueDescriptorProto{Name: proto.String(ev.Name.Val), Number: proto.Int32(num)} + r.putEnumValueNode(evd, ev) + if opts := ev.Options.GetElements(); len(opts) > 0 { + evd.Options = &descriptorpb.EnumValueOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + return evd +} + +func (r *result) asMethodDescriptor(node *ast.RPCNode) *descriptorpb.MethodDescriptorProto { + md := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(node.Name.Val), + InputType: proto.String(string(node.Input.MessageType.AsIdentifier())), + OutputType: proto.String(string(node.Output.MessageType.AsIdentifier())), + } + r.putMethodNode(md, node) + if node.Input.Stream != nil { + md.ClientStreaming = proto.Bool(true) + } + if node.Output.Stream != nil { + md.ServerStreaming = proto.Bool(true) + } + // protoc always adds a MethodOptions if there are brackets + // We do the same to match protoc as closely as possible + // https://github.com/protocolbuffers/protobuf/blob/0c3f43a6190b77f1f68b7425d1b7e1a8257a8d0c/src/google/protobuf/compiler/parser.cc#L2152 + if node.OpenBrace != nil { + md.Options = &descriptorpb.MethodOptions{} + for _, decl := range node.Decls { + if option, ok := decl.(*ast.OptionNode); ok { + md.Options.UninterpretedOption = append(md.Options.UninterpretedOption, r.asUninterpretedOption(option)) + } + } + } + return md +} + +func (r *result) asEnumDescriptor(en *ast.EnumNode, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto { + ed := &descriptorpb.EnumDescriptorProto{Name: proto.String(en.Name.Val)} + r.putEnumNode(ed, en) + for _, decl := range en.Decls { + switch decl := decl.(type) { + case *ast.OptionNode: + if ed.Options == nil { + ed.Options = &descriptorpb.EnumOptions{} + } + ed.Options.UninterpretedOption = append(ed.Options.UninterpretedOption, r.asUninterpretedOption(decl)) + case *ast.EnumValueNode: + ed.Value = append(ed.Value, r.asEnumValue(decl, handler)) + case *ast.ReservedNode: + for _, n := range decl.Names { + ed.ReservedName = append(ed.ReservedName, n.AsString()) + } + for _, rng := range decl.Ranges { + ed.ReservedRange = append(ed.ReservedRange, r.asEnumReservedRange(rng, handler)) + } + } + } + return ed +} + +func (r *result) asEnumReservedRange(rng *ast.RangeNode, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto_EnumReservedRange { + start, end := r.getRangeBounds(rng, math.MinInt32, math.MaxInt32, handler) + rr := &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(start), + End: proto.Int32(end), + } + r.putEnumReservedRangeNode(rr, rng) + return rr +} + +func (r *result) asMessageDescriptor(node *ast.MessageNode, isProto3 bool, handler *reporter.Handler, depth int) *descriptorpb.DescriptorProto { + msgd := &descriptorpb.DescriptorProto{Name: proto.String(node.Name.Val)} + r.putMessageNode(msgd, node) + // don't bother processing body if we've exceeded depth + if r.checkDepth(depth, node, handler) { + r.addMessageBody(msgd, &node.MessageBody, isProto3, handler, depth) + } + return msgd +} + +func (r *result) checkDepth(depth int, node ast.MessageDeclNode, handler *reporter.Handler) bool { + if depth < 32 { + return true + } + n := ast.Node(node) + if grp, ok := n.(*ast.GroupNode); ok { + // pinpoint the group keyword if the source is a group + n = grp.Keyword + } + _ = handler.HandleErrorf(r.file.NodeInfo(n).Start(), "message nesting depth must be less than 32") + return false +} + +func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.MessageBody, isProto3 bool, handler *reporter.Handler, depth int) { + // first process any options + for _, decl := range body.Decls { + if opt, ok := decl.(*ast.OptionNode); ok { + if msgd.Options == nil { + msgd.Options = &descriptorpb.MessageOptions{} + } + msgd.Options.UninterpretedOption = append(msgd.Options.UninterpretedOption, r.asUninterpretedOption(opt)) + } + } + + // now that we have options, we can see if this uses messageset wire format, which + // impacts how we validate tag numbers in any fields in the message + maxTag := int32(internal.MaxNormalTag) + messageSetOpt, err := r.isMessageSetWireFormat("message "+msgd.GetName(), msgd, handler) + if err != nil { + return + } else if messageSetOpt != nil { + if isProto3 { + node := r.OptionNode(messageSetOpt) + nodeInfo := r.file.NodeInfo(node) + _ = handler.HandleErrorf(nodeInfo.Start(), "messages with message-set wire format are not allowed with proto3 syntax") + } + maxTag = internal.MaxTag // higher limit for messageset wire format + } + + rsvdNames := map[string]int{} + + // now we can process the rest + for _, decl := range body.Decls { + switch decl := decl.(type) { + case *ast.EnumNode: + msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl, handler)) + case *ast.ExtendNode: + r.addExtensions(decl, &msgd.Extension, &msgd.NestedType, isProto3, handler, depth) + case *ast.ExtensionRangeNode: + msgd.ExtensionRange = append(msgd.ExtensionRange, r.asExtensionRanges(decl, maxTag, handler)...) + case *ast.FieldNode: + fd := r.asFieldDescriptor(decl, maxTag, isProto3, handler) + msgd.Field = append(msgd.Field, fd) + case *ast.MapFieldNode: + fd, md := r.asMapDescriptors(decl, isProto3, maxTag, handler, depth+1) + msgd.Field = append(msgd.Field, fd) + msgd.NestedType = append(msgd.NestedType, md) + case *ast.GroupNode: + fd, md := r.asGroupDescriptors(decl, isProto3, maxTag, handler, depth+1) + msgd.Field = append(msgd.Field, fd) + msgd.NestedType = append(msgd.NestedType, md) + case *ast.OneOfNode: + oodIndex := len(msgd.OneofDecl) + ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(decl.Name.Val)} + r.putOneOfNode(ood, decl) + msgd.OneofDecl = append(msgd.OneofDecl, ood) + ooFields := 0 + for _, oodecl := range decl.Decls { + switch oodecl := oodecl.(type) { + case *ast.OptionNode: + if ood.Options == nil { + ood.Options = &descriptorpb.OneofOptions{} + } + ood.Options.UninterpretedOption = append(ood.Options.UninterpretedOption, r.asUninterpretedOption(oodecl)) + case *ast.FieldNode: + fd := r.asFieldDescriptor(oodecl, maxTag, isProto3, handler) + fd.OneofIndex = proto.Int32(int32(oodIndex)) + msgd.Field = append(msgd.Field, fd) + ooFields++ + case *ast.GroupNode: + fd, md := r.asGroupDescriptors(oodecl, isProto3, maxTag, handler, depth+1) + fd.OneofIndex = proto.Int32(int32(oodIndex)) + msgd.Field = append(msgd.Field, fd) + msgd.NestedType = append(msgd.NestedType, md) + ooFields++ + } + } + if ooFields == 0 { + declNodeInfo := r.file.NodeInfo(decl) + _ = handler.HandleErrorf(declNodeInfo.Start(), "oneof must contain at least one field") + } + case *ast.MessageNode: + msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl, isProto3, handler, depth+1)) + case *ast.ReservedNode: + for _, n := range decl.Names { + count := rsvdNames[n.AsString()] + if count == 1 { // already seen + nameNodeInfo := r.file.NodeInfo(n) + _ = handler.HandleErrorf(nameNodeInfo.Start(), "name %q is reserved multiple times", n.AsString()) + } + rsvdNames[n.AsString()] = count + 1 + msgd.ReservedName = append(msgd.ReservedName, n.AsString()) + } + for _, rng := range decl.Ranges { + msgd.ReservedRange = append(msgd.ReservedRange, r.asMessageReservedRange(rng, maxTag, handler)) + } + } + } + + if messageSetOpt != nil { + if len(msgd.Field) > 0 { + node := r.FieldNode(msgd.Field[0]) + nodeInfo := r.file.NodeInfo(node) + _ = handler.HandleErrorf(nodeInfo.Start(), "messages with message-set wire format cannot contain non-extension fields") + } + if len(msgd.ExtensionRange) == 0 { + node := r.OptionNode(messageSetOpt) + nodeInfo := r.file.NodeInfo(node) + _ = handler.HandleErrorf(nodeInfo.Start(), "messages with message-set wire format must contain at least one extension range") + } + } + + // process any proto3_optional fields + if isProto3 { + r.processProto3OptionalFields(msgd) + } +} + +func (r *result) isMessageSetWireFormat(scope string, md *descriptorpb.DescriptorProto, handler *reporter.Handler) (*descriptorpb.UninterpretedOption, error) { + uo := md.GetOptions().GetUninterpretedOption() + index, err := internal.FindOption(r, handler, scope, uo, "message_set_wire_format") + if err != nil { + return nil, err + } + if index == -1 { + // no such option + return nil, nil + } + + opt := uo[index] + + switch opt.GetIdentifierValue() { + case "true": + return opt, nil + case "false": + return nil, nil + default: + optNode := r.OptionNode(opt) + optNodeInfo := r.file.NodeInfo(optNode.GetValue()) + return nil, handler.HandleErrorf(optNodeInfo.Start(), "%s: expecting bool value for message_set_wire_format option", scope) + } +} + +func (r *result) asMessageReservedRange(rng *ast.RangeNode, maxTag int32, handler *reporter.Handler) *descriptorpb.DescriptorProto_ReservedRange { + start, end := r.getRangeBounds(rng, 1, maxTag, handler) + rr := &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(start), + End: proto.Int32(end + 1), + } + r.putMessageReservedRangeNode(rr, rng) + return rr +} + +func (r *result) getRangeBounds(rng *ast.RangeNode, minVal, maxVal int32, handler *reporter.Handler) (int32, int32) { + checkOrder := true + start, ok := rng.StartValueAsInt32(minVal, maxVal) + if !ok { + checkOrder = false + startValNodeInfo := r.file.NodeInfo(rng.StartVal) + _ = handler.HandleErrorf(startValNodeInfo.Start(), "range start %d is out of range: should be between %d and %d", rng.StartValue(), minVal, maxVal) + } + + end, ok := rng.EndValueAsInt32(minVal, maxVal) + if !ok { + checkOrder = false + if rng.EndVal != nil { + endValNodeInfo := r.file.NodeInfo(rng.EndVal) + _ = handler.HandleErrorf(endValNodeInfo.Start(), "range end %d is out of range: should be between %d and %d", rng.EndValue(), minVal, maxVal) + } + } + + if checkOrder && start > end { + rangeStartNodeInfo := r.file.NodeInfo(rng.RangeStart()) + _ = handler.HandleErrorf(rangeStartNodeInfo.Start(), "range, %d to %d, is invalid: start must be <= end", start, end) + } + + return start, end +} + +func (r *result) asServiceDescriptor(svc *ast.ServiceNode) *descriptorpb.ServiceDescriptorProto { + sd := &descriptorpb.ServiceDescriptorProto{Name: proto.String(svc.Name.Val)} + r.putServiceNode(sd, svc) + for _, decl := range svc.Decls { + switch decl := decl.(type) { + case *ast.OptionNode: + if sd.Options == nil { + sd.Options = &descriptorpb.ServiceOptions{} + } + sd.Options.UninterpretedOption = append(sd.Options.UninterpretedOption, r.asUninterpretedOption(decl)) + case *ast.RPCNode: + sd.Method = append(sd.Method, r.asMethodDescriptor(decl)) + } + } + return sd +} + +func (r *result) checkTag(n ast.Node, v uint64, maxTag int32) error { + switch { + case v < 1: + return reporter.Errorf(r.file.NodeInfo(n).Start(), "tag number %d must be greater than zero", v) + case v > uint64(maxTag): + return reporter.Errorf(r.file.NodeInfo(n).Start(), "tag number %d is higher than max allowed tag number (%d)", v, maxTag) + case v >= internal.SpecialReservedStart && v <= internal.SpecialReservedEnd: + return reporter.Errorf(r.file.NodeInfo(n).Start(), "tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd) + default: + return nil + } +} + +// processProto3OptionalFields adds synthetic oneofs to the given message descriptor +// for each proto3 optional field. It also updates the fields to have the correct +// oneof index reference. +func (r *result) processProto3OptionalFields(msgd *descriptorpb.DescriptorProto) { + // add synthetic oneofs to the given message descriptor for each proto3 + // optional field, and update each field to have correct oneof index + var allNames map[string]struct{} + for _, fd := range msgd.Field { + if fd.GetProto3Optional() { + // lazy init the set of all names + if allNames == nil { + allNames = map[string]struct{}{} + for _, fd := range msgd.Field { + allNames[fd.GetName()] = struct{}{} + } + for _, od := range msgd.OneofDecl { + allNames[od.GetName()] = struct{}{} + } + // NB: protoc only considers names of other fields and oneofs + // when computing the synthetic oneof name. But that feels like + // a bug, since it means it could generate a name that conflicts + // with some other symbol defined in the message. If it's decided + // that's NOT a bug and is desirable, then we should remove the + // following four loops to mimic protoc's behavior. + for _, fd := range msgd.Extension { + allNames[fd.GetName()] = struct{}{} + } + for _, ed := range msgd.EnumType { + allNames[ed.GetName()] = struct{}{} + for _, evd := range ed.Value { + allNames[evd.GetName()] = struct{}{} + } + } + for _, fd := range msgd.NestedType { + allNames[fd.GetName()] = struct{}{} + } + } + + // Compute a name for the synthetic oneof. This uses the same + // algorithm as used in protoc: + // https://github.com/protocolbuffers/protobuf/blob/74ad62759e0a9b5a21094f3fb9bb4ebfaa0d1ab8/src/google/protobuf/compiler/parser.cc#L785-L803 + ooName := fd.GetName() + if !strings.HasPrefix(ooName, "_") { + ooName = "_" + ooName + } + for { + _, ok := allNames[ooName] + if !ok { + // found a unique name + allNames[ooName] = struct{}{} + break + } + ooName = "X" + ooName + } + + fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl))) + ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)} + msgd.OneofDecl = append(msgd.OneofDecl, ood) + ooident := r.FieldNode(fd).(*ast.FieldNode) //nolint:errcheck + r.putOneOfNode(ood, ast.NewSyntheticOneOf(ooident)) + } + } +} + +func (r *result) Node(m proto.Message) ast.Node { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[m] +} + +func (r *result) FileNode() ast.FileDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[r.proto].(ast.FileDeclNode) +} + +func (r *result) OptionNode(o *descriptorpb.UninterpretedOption) ast.OptionDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[o].(ast.OptionDeclNode) +} + +func (r *result) OptionNamePartNode(o *descriptorpb.UninterpretedOption_NamePart) ast.Node { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[o] +} + +func (r *result) MessageNode(m *descriptorpb.DescriptorProto) ast.MessageDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[m].(ast.MessageDeclNode) +} + +func (r *result) FieldNode(f *descriptorpb.FieldDescriptorProto) ast.FieldDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[f].(ast.FieldDeclNode) +} + +func (r *result) OneOfNode(o *descriptorpb.OneofDescriptorProto) ast.Node { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[o] +} + +func (r *result) ExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[e].(ast.RangeDeclNode) +} + +func (r *result) MessageReservedRangeNode(rr *descriptorpb.DescriptorProto_ReservedRange) ast.RangeDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[rr].(ast.RangeDeclNode) +} + +func (r *result) EnumNode(e *descriptorpb.EnumDescriptorProto) ast.Node { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[e] +} + +func (r *result) EnumValueNode(e *descriptorpb.EnumValueDescriptorProto) ast.EnumValueDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[e].(ast.EnumValueDeclNode) +} + +func (r *result) EnumReservedRangeNode(rr *descriptorpb.EnumDescriptorProto_EnumReservedRange) ast.RangeDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[rr].(ast.RangeDeclNode) +} + +func (r *result) ServiceNode(s *descriptorpb.ServiceDescriptorProto) ast.Node { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[s] +} + +func (r *result) MethodNode(m *descriptorpb.MethodDescriptorProto) ast.RPCDeclNode { + if r.nodes == nil { + return ast.NewNoSourceNode(r.proto.GetName()) + } + return r.nodes[m].(ast.RPCDeclNode) +} + +func (r *result) putFileNode(f *descriptorpb.FileDescriptorProto, n *ast.FileNode) { + r.nodes[f] = n +} + +func (r *result) putOptionNode(o *descriptorpb.UninterpretedOption, n *ast.OptionNode) { + r.nodes[o] = n +} + +func (r *result) putOptionNamePartNode(o *descriptorpb.UninterpretedOption_NamePart, n *ast.FieldReferenceNode) { + r.nodes[o] = n +} + +func (r *result) putMessageNode(m *descriptorpb.DescriptorProto, n ast.MessageDeclNode) { + r.nodes[m] = n +} + +func (r *result) putFieldNode(f *descriptorpb.FieldDescriptorProto, n ast.FieldDeclNode) { + r.nodes[f] = n +} + +func (r *result) putOneOfNode(o *descriptorpb.OneofDescriptorProto, n ast.OneOfDeclNode) { + r.nodes[o] = n +} + +func (r *result) putExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange, n *ast.RangeNode) { + r.nodes[e] = n +} + +func (r *result) putMessageReservedRangeNode(rr *descriptorpb.DescriptorProto_ReservedRange, n *ast.RangeNode) { + r.nodes[rr] = n +} + +func (r *result) putEnumNode(e *descriptorpb.EnumDescriptorProto, n *ast.EnumNode) { + r.nodes[e] = n +} + +func (r *result) putEnumValueNode(e *descriptorpb.EnumValueDescriptorProto, n *ast.EnumValueNode) { + r.nodes[e] = n +} + +func (r *result) putEnumReservedRangeNode(rr *descriptorpb.EnumDescriptorProto_EnumReservedRange, n *ast.RangeNode) { + r.nodes[rr] = n +} + +func (r *result) putServiceNode(s *descriptorpb.ServiceDescriptorProto, n *ast.ServiceNode) { + r.nodes[s] = n +} + +func (r *result) putMethodNode(m *descriptorpb.MethodDescriptorProto, n *ast.RPCNode) { + r.nodes[m] = n +} + +// NB: If we ever add other put*Node methods, to index other kinds of elements in the descriptor +// proto hierarchy, we need to update the index recreation logic in clone.go, too. diff --git a/vendor/github.com/bufbuild/protocompile/parser/validate.go b/vendor/github.com/bufbuild/protocompile/parser/validate.go new file mode 100644 index 00000000..494a00ef --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/validate.go @@ -0,0 +1,499 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/walk" +) + +func validateBasic(res *result, handler *reporter.Handler) { + fd := res.proto + isProto3 := fd.GetSyntax() == "proto3" + + if err := validateImports(res, handler); err != nil { + return + } + + _ = walk.DescriptorProtos(fd, + func(name protoreflect.FullName, d proto.Message) error { + switch d := d.(type) { + case *descriptorpb.DescriptorProto: + if err := validateMessage(res, isProto3, name, d, handler); err != nil { + // exit func is not called when enter returns error + return err + } + case *descriptorpb.EnumDescriptorProto: + if err := validateEnum(res, isProto3, name, d, handler); err != nil { + return err + } + case *descriptorpb.FieldDescriptorProto: + if err := validateField(res, isProto3, name, d, handler); err != nil { + return err + } + } + return nil + }) +} + +func validateImports(res *result, handler *reporter.Handler) error { + fileNode := res.file + if fileNode == nil { + return nil + } + imports := make(map[string]ast.SourcePos) + for _, decl := range fileNode.Decls { + imp, ok := decl.(*ast.ImportNode) + if !ok { + continue + } + startPos := fileNode.NodeInfo(decl).Start() + name := imp.Name.AsString() + if prev, ok := imports[name]; ok { + return handler.HandleErrorf(startPos, "%q was already imported at %v", name, prev) + } + imports[name] = startPos + } + return nil +} + +func validateMessage(res *result, isProto3 bool, name protoreflect.FullName, md *descriptorpb.DescriptorProto, handler *reporter.Handler) error { + scope := fmt.Sprintf("message %s", name) + + if isProto3 && len(md.ExtensionRange) > 0 { + n := res.ExtensionRangeNode(md.ExtensionRange[0]) + nInfo := res.file.NodeInfo(n) + if err := handler.HandleErrorf(nInfo.Start(), "%s: extension ranges are not allowed in proto3", scope); err != nil { + return err + } + } + + if index, err := internal.FindOption(res, handler, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil { + return err + } else if index >= 0 { + opt := md.Options.UninterpretedOption[index] + optn := res.OptionNode(opt) + md.Options.UninterpretedOption = internal.RemoveOption(md.Options.UninterpretedOption, index) + valid := false + if opt.IdentifierValue != nil { + if opt.GetIdentifierValue() == "true" { + valid = true + optionNodeInfo := res.file.NodeInfo(optn.GetValue()) + if err := handler.HandleErrorf(optionNodeInfo.Start(), "%s: map_entry option should not be set explicitly; use map type instead", scope); err != nil { + return err + } + } else if opt.GetIdentifierValue() == "false" { + valid = true + md.Options.MapEntry = proto.Bool(false) + } + } + if !valid { + optionNodeInfo := res.file.NodeInfo(optn.GetValue()) + if err := handler.HandleErrorf(optionNodeInfo.Start(), "%s: expecting bool value for map_entry option", scope); err != nil { + return err + } + } + } + + // reserved ranges should not overlap + rsvd := make(tagRanges, len(md.ReservedRange)) + for i, r := range md.ReservedRange { + n := res.MessageReservedRangeNode(r) + rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n} + } + sort.Sort(rsvd) + for i := 1; i < len(rsvd); i++ { + if rsvd[i].start < rsvd[i-1].end { + rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) + if err := handler.HandleErrorf(rangeNodeInfo.Start(), "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { + return err + } + } + } + + // extensions ranges should not overlap + exts := make(tagRanges, len(md.ExtensionRange)) + for i, r := range md.ExtensionRange { + n := res.ExtensionRangeNode(r) + exts[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n} + } + sort.Sort(exts) + for i := 1; i < len(exts); i++ { + if exts[i].start < exts[i-1].end { + rangeNodeInfo := res.file.NodeInfo(exts[i].node) + if err := handler.HandleErrorf(rangeNodeInfo.Start(), "%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1); err != nil { + return err + } + } + } + + // see if any extension range overlaps any reserved range + var i, j int // i indexes rsvd; j indexes exts + for i < len(rsvd) && j < len(exts) { + if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end || + exts[j].start >= rsvd[i].start && exts[j].start < rsvd[i].end { + var pos ast.SourcePos + if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end { + rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) + pos = rangeNodeInfo.Start() + } else { + rangeNodeInfo := res.file.NodeInfo(exts[j].node) + pos = rangeNodeInfo.Start() + } + // ranges overlap + if err := handler.HandleErrorf(pos, "%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { + return err + } + } + if rsvd[i].start < exts[j].start { + i++ + } else { + j++ + } + } + + // now, check that fields don't re-use tags and don't try to use extension + // or reserved ranges or reserved names + rsvdNames := map[string]struct{}{} + for _, n := range md.ReservedName { + // validate reserved name while we're here + if !isIdentifier(n) { + node := findMessageReservedNameNode(res.MessageNode(md), n) + nodeInfo := res.file.NodeInfo(node) + if err := handler.HandleErrorf(nodeInfo.Start(), "%s: reserved name %q is not a valid identifier", scope, n); err != nil { + return err + } + } + rsvdNames[n] = struct{}{} + } + fieldTags := map[int32]string{} + for _, fld := range md.Field { + fn := res.FieldNode(fld) + if _, ok := rsvdNames[fld.GetName()]; ok { + fieldNameNodeInfo := res.file.NodeInfo(fn.FieldName()) + if err := handler.HandleErrorf(fieldNameNodeInfo.Start(), "%s: field %s is using a reserved name", scope, fld.GetName()); err != nil { + return err + } + } + if existing := fieldTags[fld.GetNumber()]; existing != "" { + fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) + if err := handler.HandleErrorf(fieldTagNodeInfo.Start(), "%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber()); err != nil { + return err + } + } + fieldTags[fld.GetNumber()] = fld.GetName() + // check reserved ranges + r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end > fld.GetNumber() }) + if r < len(rsvd) && rsvd[r].start <= fld.GetNumber() { + fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) + if err := handler.HandleErrorf(fieldTagNodeInfo.Start(), "%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1); err != nil { + return err + } + } + // and check extension ranges + e := sort.Search(len(exts), func(index int) bool { return exts[index].end > fld.GetNumber() }) + if e < len(exts) && exts[e].start <= fld.GetNumber() { + fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) + if err := handler.HandleErrorf(fieldTagNodeInfo.Start(), "%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1); err != nil { + return err + } + } + } + + return nil +} + +func isIdentifier(s string) bool { + if len(s) == 0 { + return false + } + for i, r := range s { + if i == 0 && r >= '0' && r <= '9' { + // can't start with number + return false + } + // alphanumeric and underscore ok; everything else bad + switch { + case r >= '0' && r <= '9': + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r == '_': + default: + return false + } + } + return true +} + +func findMessageReservedNameNode(msgNode ast.MessageDeclNode, name string) ast.Node { + var decls []ast.MessageElement + switch msgNode := msgNode.(type) { + case *ast.MessageNode: + decls = msgNode.Decls + case *ast.GroupNode: + decls = msgNode.Decls + default: + // leave decls empty + } + return findReservedNameNode(msgNode, decls, name) +} + +func findReservedNameNode[T ast.Node](parent ast.Node, decls []T, name string) ast.Node { + for _, decl := range decls { + // NB: We have to convert to empty interface first, before we can do a type + // assertion because type assertions on type parameters aren't allowed. (The + // compiler cannot yet know whether T is an interface type or not.) + rsvd, ok := any(decl).(*ast.ReservedNode) + if !ok { + continue + } + for _, rsvdName := range rsvd.Names { + if rsvdName.AsString() == name { + return rsvdName + } + } + } + // couldn't find it? Instead of puking, report position of the parent. + return parent +} + +func validateEnum(res *result, isProto3 bool, name protoreflect.FullName, ed *descriptorpb.EnumDescriptorProto, handler *reporter.Handler) error { + scope := fmt.Sprintf("enum %s", name) + + if len(ed.Value) == 0 { + enNode := res.EnumNode(ed) + enNodeInfo := res.file.NodeInfo(enNode) + if err := handler.HandleErrorf(enNodeInfo.Start(), "%s: enums must define at least one value", scope); err != nil { + return err + } + } + + allowAlias := false + var allowAliasOpt *descriptorpb.UninterpretedOption + if index, err := internal.FindOption(res, handler, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil { + return err + } else if index >= 0 { + allowAliasOpt = ed.Options.UninterpretedOption[index] + valid := false + if allowAliasOpt.IdentifierValue != nil { + if allowAliasOpt.GetIdentifierValue() == "true" { + allowAlias = true + valid = true + } else if allowAliasOpt.GetIdentifierValue() == "false" { + valid = true + } + } + if !valid { + optNode := res.OptionNode(allowAliasOpt) + optNodeInfo := res.file.NodeInfo(optNode.GetValue()) + if err := handler.HandleErrorf(optNodeInfo.Start(), "%s: expecting bool value for allow_alias option", scope); err != nil { + return err + } + } + } + + if isProto3 && len(ed.Value) > 0 && ed.Value[0].GetNumber() != 0 { + evNode := res.EnumValueNode(ed.Value[0]) + evNodeInfo := res.file.NodeInfo(evNode.GetNumber()) + if err := handler.HandleErrorf(evNodeInfo.Start(), "%s: proto3 requires that first value in enum have numeric value of 0", scope); err != nil { + return err + } + } + + // check for aliases + vals := map[int32]string{} + hasAlias := false + for _, evd := range ed.Value { + existing := vals[evd.GetNumber()] + if existing != "" { + if allowAlias { + hasAlias = true + } else { + evNode := res.EnumValueNode(evd) + evNodeInfo := res.file.NodeInfo(evNode.GetNumber()) + if err := handler.HandleErrorf(evNodeInfo.Start(), "%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber()); err != nil { + return err + } + } + } + vals[evd.GetNumber()] = evd.GetName() + } + if allowAlias && !hasAlias { + optNode := res.OptionNode(allowAliasOpt) + optNodeInfo := res.file.NodeInfo(optNode.GetValue()) + if err := handler.HandleErrorf(optNodeInfo.Start(), "%s: allow_alias is true but no values are aliases", scope); err != nil { + return err + } + } + + // reserved ranges should not overlap + rsvd := make(tagRanges, len(ed.ReservedRange)) + for i, r := range ed.ReservedRange { + n := res.EnumReservedRangeNode(r) + rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n} + } + sort.Sort(rsvd) + for i := 1; i < len(rsvd); i++ { + if rsvd[i].start <= rsvd[i-1].end { + rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) + if err := handler.HandleErrorf(rangeNodeInfo.Start(), "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end); err != nil { + return err + } + } + } + + // now, check that fields don't re-use tags and don't try to use extension + // or reserved ranges or reserved names + rsvdNames := map[string]struct{}{} + for _, n := range ed.ReservedName { + // validate reserved name while we're here + if !isIdentifier(n) { + node := findEnumReservedNameNode(res.EnumNode(ed), n) + nodeInfo := res.file.NodeInfo(node) + if err := handler.HandleErrorf(nodeInfo.Start(), "%s: reserved name %q is not a valid identifier", scope, n); err != nil { + return err + } + } + rsvdNames[n] = struct{}{} + } + for _, ev := range ed.Value { + evn := res.EnumValueNode(ev) + if _, ok := rsvdNames[ev.GetName()]; ok { + enumValNodeInfo := res.file.NodeInfo(evn.GetName()) + if err := handler.HandleErrorf(enumValNodeInfo.Start(), "%s: value %s is using a reserved name", scope, ev.GetName()); err != nil { + return err + } + } + // check reserved ranges + r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end >= ev.GetNumber() }) + if r < len(rsvd) && rsvd[r].start <= ev.GetNumber() { + enumValNodeInfo := res.file.NodeInfo(evn.GetNumber()) + if err := handler.HandleErrorf(enumValNodeInfo.Start(), "%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end); err != nil { + return err + } + } + } + + return nil +} + +func findEnumReservedNameNode(enumNode ast.Node, name string) ast.Node { + var decls []ast.EnumElement + if enumNode, ok := enumNode.(*ast.EnumNode); ok { + decls = enumNode.Decls + // if not the right type, we leave decls empty + } + return findReservedNameNode(enumNode, decls, name) +} + +func validateField(res *result, isProto3 bool, name protoreflect.FullName, fld *descriptorpb.FieldDescriptorProto, handler *reporter.Handler) error { + scope := fmt.Sprintf("field %s", name) + + node := res.FieldNode(fld) + if isProto3 { + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + groupNodeInfo := res.file.NodeInfo(node.GetGroupKeyword()) + if err := handler.HandleErrorf(groupNodeInfo.Start(), "%s: groups are not allowed in proto3", scope); err != nil { + return err + } + } else if fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) + if err := handler.HandleErrorf(fieldLabelNodeInfo.Start(), "%s: label 'required' is not allowed in proto3", scope); err != nil { + return err + } + } + if index, err := internal.FindOption(res, handler, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil { + return err + } else if index >= 0 { + optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index]) + optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) + if err := handler.HandleErrorf(optNameNodeInfo.Start(), "%s: default values are not allowed in proto3", scope); err != nil { + return err + } + } + } else { + if fld.Label == nil && fld.OneofIndex == nil { + fieldNameNodeInfo := res.file.NodeInfo(node.FieldName()) + if err := handler.HandleErrorf(fieldNameNodeInfo.Start(), "%s: field has no label; proto2 requires explicit 'optional' label", scope); err != nil { + return err + } + } + if fld.GetExtendee() != "" && fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) + if err := handler.HandleErrorf(fieldLabelNodeInfo.Start(), "%s: extension fields cannot be 'required'", scope); err != nil { + return err + } + } + } + + return nil +} + +type tagRange struct { + start int32 + end int32 + node ast.RangeDeclNode +} + +type tagRanges []tagRange + +func (r tagRanges) Len() int { + return len(r) +} + +func (r tagRanges) Less(i, j int) bool { + return r[i].start < r[j].start || + (r[i].start == r[j].start && r[i].end < r[j].end) +} + +func (r tagRanges) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func fillInMissingLabels(fd *descriptorpb.FileDescriptorProto) { + for _, md := range fd.MessageType { + fillInMissingLabelsInMsg(md) + } + for _, extd := range fd.Extension { + fillInMissingLabel(extd) + } +} + +func fillInMissingLabelsInMsg(md *descriptorpb.DescriptorProto) { + for _, fld := range md.Field { + fillInMissingLabel(fld) + } + for _, nmd := range md.NestedType { + fillInMissingLabelsInMsg(nmd) + } + for _, extd := range md.Extension { + fillInMissingLabel(extd) + } +} + +func fillInMissingLabel(fld *descriptorpb.FieldDescriptorProto) { + if fld.Label == nil { + fld.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } +} diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/protos.go b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go new file mode 100644 index 00000000..ad804426 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go @@ -0,0 +1,261 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package protoutil contains useful functions for interacting with descriptors. +// For now these include only functions for efficiently converting descriptors +// produced by the compiler to descriptor protos. +// +// Despite the fact that descriptor protos are mutable, calling code should NOT +// mutate any of the protos returned from this package. For efficiency, some +// protos returned from this package may be part of internal state of a compiler +// result, and mutating the proto could corrupt or invalidate parts of that +// result. +package protoutil + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" +) + +// DescriptorProtoWrapper is a protoreflect.Descriptor that wraps an +// underlying descriptor proto. It provides the same interface as +// Descriptor but with one extra operation, to efficiently query for +// the underlying descriptor proto. +// +// Descriptors that implement this will also implement another method +// whose specified return type is the concrete type returned by the +// AsProto method. The name of this method varies by the type of this +// descriptor: +// +// Descriptor Type Other Method Name +// ---------------------+------------------------------------ +// FileDescriptor | FileDescriptorProto() +// MessageDescriptor | MessageDescriptorProto() +// FieldDescriptor | FieldDescriptorProto() +// OneofDescriptor | OneOfDescriptorProto() +// EnumDescriptor | EnumDescriptorProto() +// EnumValueDescriptor | EnumValueDescriptorProto() +// ServiceDescriptor | ServiceDescriptorProto() +// MethodDescriptor | MethodDescriptorProto() +// +// For example, a DescriptorProtoWrapper that implements FileDescriptor +// returns a *descriptorpb.FileDescriptorProto value from its AsProto +// method and also provides a method with the following signature: +// +// FileDescriptorProto() *descriptorpb.FileDescriptorProto +type DescriptorProtoWrapper interface { + protoreflect.Descriptor + // AsProto returns the underlying descriptor proto. The concrete + // type of the proto message depends on the type of this + // descriptor: + // Descriptor Type Proto Message Type + // ---------------------+------------------------------------ + // FileDescriptor | *descriptorpb.FileDescriptorProto + // MessageDescriptor | *descriptorpb.DescriptorProto + // FieldDescriptor | *descriptorpb.FieldDescriptorProto + // OneofDescriptor | *descriptorpb.OneofDescriptorProto + // EnumDescriptor | *descriptorpb.EnumDescriptorProto + // EnumValueDescriptor | *descriptorpb.EnumValueDescriptorProto + // ServiceDescriptor | *descriptorpb.ServiceDescriptorProto + // MethodDescriptor | *descriptorpb.MethodDescriptorProto + AsProto() proto.Message +} + +// ProtoFromDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message { + switch d := d.(type) { + case protoreflect.FileDescriptor: + return ProtoFromFileDescriptor(d) + case protoreflect.MessageDescriptor: + return ProtoFromMessageDescriptor(d) + case protoreflect.FieldDescriptor: + return ProtoFromFieldDescriptor(d) + case protoreflect.OneofDescriptor: + return ProtoFromOneofDescriptor(d) + case protoreflect.EnumDescriptor: + return ProtoFromEnumDescriptor(d) + case protoreflect.EnumValueDescriptor: + return ProtoFromEnumValueDescriptor(d) + case protoreflect.ServiceDescriptor: + return ProtoFromServiceDescriptor(d) + case protoreflect.MethodDescriptor: + return ProtoFromMethodDescriptor(d) + default: + // WTF?? + if res, ok := d.(DescriptorProtoWrapper); ok { + return res.AsProto() + } + return nil + } +} + +// ProtoFromFileDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For file descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. File descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + if imp, ok := d.(protoreflect.FileImport); ok { + d = imp.FileDescriptor + } + type canProto interface { + FileDescriptorProto() *descriptorpb.FileDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.FileDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok { + return fd + } + } + return protodesc.ToFileDescriptorProto(d) +} + +// ProtoFromMessageDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For message descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Message descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromMessageDescriptor(d protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + type canProto interface { + MessageDescriptorProto() *descriptorpb.DescriptorProto + } + if res, ok := d.(canProto); ok { + return res.MessageDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if md, ok := res.AsProto().(*descriptorpb.DescriptorProto); ok { + return md + } + } + return protodesc.ToDescriptorProto(d) +} + +// ProtoFromFieldDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For field descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Field descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromFieldDescriptor(d protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + type canProto interface { + FieldDescriptorProto() *descriptorpb.FieldDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.FieldDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if fd, ok := res.AsProto().(*descriptorpb.FieldDescriptorProto); ok { + return fd + } + } + return protodesc.ToFieldDescriptorProto(d) +} + +// ProtoFromOneofDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For oneof descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Oneof descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromOneofDescriptor(d protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + type canProto interface { + OneofDescriptorProto() *descriptorpb.OneofDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.OneofDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if ood, ok := res.AsProto().(*descriptorpb.OneofDescriptorProto); ok { + return ood + } + } + return protodesc.ToOneofDescriptorProto(d) +} + +// ProtoFromEnumDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For enum descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Enum descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromEnumDescriptor(d protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + type canProto interface { + EnumDescriptorProto() *descriptorpb.EnumDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.EnumDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if ed, ok := res.AsProto().(*descriptorpb.EnumDescriptorProto); ok { + return ed + } + } + return protodesc.ToEnumDescriptorProto(d) +} + +// ProtoFromEnumValueDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For enum value descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Enum value descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromEnumValueDescriptor(d protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + type canProto interface { + EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.EnumValueDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if ed, ok := res.AsProto().(*descriptorpb.EnumValueDescriptorProto); ok { + return ed + } + } + return protodesc.ToEnumValueDescriptorProto(d) +} + +// ProtoFromServiceDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For service descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Service descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromServiceDescriptor(d protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + type canProto interface { + ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.ServiceDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if sd, ok := res.AsProto().(*descriptorpb.ServiceDescriptorProto); ok { + return sd + } + } + return protodesc.ToServiceDescriptorProto(d) +} + +// ProtoFromMethodDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For method descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Method descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + type canProto interface { + MethodDescriptorProto() *descriptorpb.MethodDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.MethodDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok { + return md + } + } + return protodesc.ToMethodDescriptorProto(d) +} diff --git a/vendor/github.com/bufbuild/protocompile/reporter/errors.go b/vendor/github.com/bufbuild/protocompile/reporter/errors.go new file mode 100644 index 00000000..2932c79f --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/reporter/errors.go @@ -0,0 +1,68 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reporter + +import ( + "errors" + "fmt" + + "github.com/bufbuild/protocompile/ast" +) + +// ErrInvalidSource is a sentinel error that is returned by compilation and +// stand-alone compilation steps (such as parsing, linking) when one or more +// errors is reported but the configured ErrorReporter always returns nil. +var ErrInvalidSource = errors.New("parse failed: invalid proto source") + +// ErrorWithPos is an error about a proto source file that adds information +// about the location in the file that caused the error. +type ErrorWithPos interface { + error + // GetPosition returns the source position that caused the underlying error. + GetPosition() ast.SourcePos + // Unwrap returns the underlying error. + Unwrap() error +} + +// Error creates a new ErrorWithPos from the given error and source position. +func Error(pos ast.SourcePos, err error) ErrorWithPos { + return errorWithSourcePos{pos: pos, underlying: err} +} + +// Errorf creates a new ErrorWithPos whose underlying error is created using the +// given message format and arguments (via fmt.Errorf). +func Errorf(pos ast.SourcePos, format string, args ...interface{}) ErrorWithPos { + return errorWithSourcePos{pos: pos, underlying: fmt.Errorf(format, args...)} +} + +type errorWithSourcePos struct { + underlying error + pos ast.SourcePos +} + +func (e errorWithSourcePos) Error() string { + sourcePos := e.GetPosition() + return fmt.Sprintf("%s: %v", sourcePos, e.underlying) +} + +func (e errorWithSourcePos) GetPosition() ast.SourcePos { + return e.pos +} + +func (e errorWithSourcePos) Unwrap() error { + return e.underlying +} + +var _ ErrorWithPos = errorWithSourcePos{} diff --git a/vendor/github.com/bufbuild/protocompile/reporter/reporter.go b/vendor/github.com/bufbuild/protocompile/reporter/reporter.go new file mode 100644 index 00000000..d3a31686 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/reporter/reporter.go @@ -0,0 +1,232 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package reporter contains the types used for reporting errors from +// protocompile operations. It contains error types as well as interfaces +// for reporting and handling errors and warnings. +package reporter + +import ( + "sync" + + "github.com/bufbuild/protocompile/ast" +) + +// ErrorReporter is responsible for reporting the given error. If the reporter +// returns a non-nil error, parsing/linking will abort with that error. If the +// reporter returns nil, parsing will continue, allowing the parser to try to +// report as many syntax and/or link errors as it can find. +type ErrorReporter func(err ErrorWithPos) error + +// WarningReporter is responsible for reporting the given warning. This is used +// for indicating non-error messages to the calling program for things that do +// not cause the parse to fail but are considered bad practice. Though they are +// just warnings, the details are supplied to the reporter via an error type. +type WarningReporter func(ErrorWithPos) + +// Reporter is a type that handles reporting both errors and warnings. +// A reporter does not need to be thread-safe. Safe concurrent access is +// managed by a Handler. +type Reporter interface { + // Error is called when the given error is encountered and needs to be + // reported to the calling program. This signature matches ErrorReporter + // because it has the same semantics. If this function returns non-nil + // then the operation will abort immediately with the given error. But + // if it returns nil, the operation will continue, reporting more errors + // as they are encountered. If the reporter never returns non-nil then + // the operation will eventually fail with ErrInvalidSource. + Error(ErrorWithPos) error + // Warning is called when the given warnings is encountered and needs to be + // reported to the calling program. Despite the argument being an error + // type, a warning will never cause the operation to abort or fail (unless + // the reporter's implementation of this method panics). + Warning(ErrorWithPos) +} + +// NewReporter creates a new reporter that invokes the given functions on error +// or warning. +func NewReporter(errs ErrorReporter, warnings WarningReporter) Reporter { + return reporterFuncs{errs: errs, warnings: warnings} +} + +type reporterFuncs struct { + errs ErrorReporter + warnings WarningReporter +} + +func (r reporterFuncs) Error(err ErrorWithPos) error { + if r.errs == nil { + return err + } + return r.errs(err) +} + +func (r reporterFuncs) Warning(err ErrorWithPos) { + if r.warnings != nil { + r.warnings(err) + } +} + +// Handler is used by protocompile operations for handling errors and warnings. +// This type is thread-safe. It uses a mutex to serialize calls to its reporter +// so that reporter instances do not have to be thread-safe (unless re-used +// across multiple handlers). +type Handler struct { + parent *Handler + mu sync.Mutex + reporter Reporter + errsReported bool + err error +} + +// NewHandler creates a new Handler that reports errors and warnings using the +// given reporter. +func NewHandler(rep Reporter) *Handler { + if rep == nil { + rep = NewReporter(nil, nil) + } + return &Handler{reporter: rep} +} + +// SubHandler returns a "child" of h. Use of a child handler is the same as use +// of the parent, except that the Error() and ReporterError() functions only +// report non-nil for errors that were reported using the child handler. So +// errors reported directly to the parent or to a different child handler won't +// be returned. This is useful for making concurrent access to the handler more +// deterministic: if a child handler is only used from one goroutine, its view +// of reported errors is consistent and unimpacted by concurrent operations. +func (h *Handler) SubHandler() *Handler { + return &Handler{parent: h} +} + +// HandleError handles the given error. If the given err is an ErrorWithPos, it +// is reported, and this function returns the error returned by the reporter. If +// the given err is NOT an ErrorWithPos, the current operation will abort +// immediately. +// +// If the handler has already aborted (by returning a non-nil error from a prior +// call to HandleError or HandleErrorf), that same error is returned and the +// given error is not reported. +func (h *Handler) HandleError(err error) error { + if h.parent != nil { + _, isErrWithPos := err.(ErrorWithPos) + err = h.parent.HandleError(err) + + // update child state + h.mu.Lock() + defer h.mu.Unlock() + if isErrWithPos { + h.errsReported = true + } + h.err = err + return err + } + + h.mu.Lock() + defer h.mu.Unlock() + + if h.err != nil { + return h.err + } + if ewp, ok := err.(ErrorWithPos); ok { + h.errsReported = true + err = h.reporter.Error(ewp) + } + h.err = err + return err +} + +// HandleErrorWithPos handles an error with the given source position. +// +// If the handler has already aborted (by returning a non-nil error from a prior +// call to HandleError or HandleErrorf), that same error is returned and the +// given error is not reported. +func (h *Handler) HandleErrorWithPos(pos ast.SourcePos, err error) error { + if ewp, ok := err.(ErrorWithPos); ok { + // replace existing position with given one + err = errorWithSourcePos{pos: pos, underlying: ewp.Unwrap()} + } else { + err = errorWithSourcePos{pos: pos, underlying: err} + } + return h.HandleError(err) +} + +// HandleErrorf handles an error with the given source position, creating the +// error using the given message format and arguments. +// +// If the handler has already aborted (by returning a non-nil error from a call +// to HandleError or HandleErrorf), that same error is returned and the given +// error is not reported. +func (h *Handler) HandleErrorf(pos ast.SourcePos, format string, args ...interface{}) error { + return h.HandleError(Errorf(pos, format, args...)) +} + +// HandleWarning handles the given warning. This will delegate to the handler's +// configured reporter. +func (h *Handler) HandleWarning(err ErrorWithPos) { + if h.parent != nil { + h.parent.HandleWarning(err) + return + } + + // even though we aren't touching mutable fields, we acquire lock anyway so + // that underlying reporter does not have to be thread-safe + h.mu.Lock() + defer h.mu.Unlock() + + h.reporter.Warning(err) +} + +// HandleWarningWithPos handles a warning with the given source position. This will +// delegate to the handler's configured reporter. +func (h *Handler) HandleWarningWithPos(pos ast.SourcePos, err error) { + ewp, ok := err.(ErrorWithPos) + if ok { + // replace existing position with given one + ewp = errorWithSourcePos{pos: pos, underlying: ewp.Unwrap()} + } else { + ewp = errorWithSourcePos{pos: pos, underlying: err} + } + h.HandleWarning(ewp) +} + +// HandleWarningf handles a warning with the given source position, creating the +// actual error value using the given message format and arguments. +func (h *Handler) HandleWarningf(pos ast.SourcePos, format string, args ...interface{}) { + h.HandleWarning(Errorf(pos, format, args...)) +} + +// Error returns the handler result. If any errors have been reported then this +// returns a non-nil error. If the reporter never returned a non-nil error then +// ErrInvalidSource is returned. Otherwise, this returns the error returned by +// the handler's reporter (the same value returned by ReporterError). +func (h *Handler) Error() error { + h.mu.Lock() + defer h.mu.Unlock() + + if h.errsReported && h.err == nil { + return ErrInvalidSource + } + return h.err +} + +// ReporterError returns the error returned by the handler's reporter. If +// the reporter has either not been invoked (no errors handled) or has not +// returned any non-nil value, then this returns nil. +func (h *Handler) ReporterError() error { + h.mu.Lock() + defer h.mu.Unlock() + + return h.err +} diff --git a/vendor/github.com/bufbuild/protocompile/resolver.go b/vendor/github.com/bufbuild/protocompile/resolver.go new file mode 100644 index 00000000..6838ef0f --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/resolver.go @@ -0,0 +1,194 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protocompile + +import ( + "errors" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/parser" +) + +// Resolver is used by the compiler to resolve a proto source file name +// into some unit that is usable by the compiler. The result could be source +// for a proto file or it could be an already-parsed AST or descriptor. +// +// Resolver implementations must be thread-safe as a single compilation +// operation could invoke FindFileByPath from multiple goroutines. +type Resolver interface { + // FindFileByPath searches for information for the given file path. If no + // result is available, it should return a non-nil error, such as + // protoregistry.NotFound. + FindFileByPath(path string) (SearchResult, error) +} + +// SearchResult represents information about a proto source file. Only one of +// the various fields must be set, based on what is available for a file. If +// multiple fields are set, the compiler prefers them in opposite order listed: +// so it uses a descriptor if present and only falls back to source if nothing +// else is available. +type SearchResult struct { + // Represents source code for the file. This should be nil if source code + // is not available. If no field below is set, then the compiler will parse + // the source code into an AST. + Source io.Reader + // Represents the abstract syntax tree for the file. If no field below is + // set, then the compiler will convert the AST into a descriptor proto. + AST *ast.FileNode + // A descriptor proto that represents the file. If the field below is not + // set, then the compiler will link this proto with its dependencies to + // produce a linked descriptor. + Proto *descriptorpb.FileDescriptorProto + // A parse result for the file. This packages both an AST and a descriptor + // proto in one. When a parser result is available, it is more efficient + // than using an AST search result, since the descriptor proto need not be + // re-created. And it provides better error messages than a descriptor proto + // search result, since the AST has greater fidelity with regard to source + // positions (even if the descriptor proto includes source code info). + ParseResult parser.Result + // A fully linked descriptor that represents the file. If this field is set, + // then the compiler has little or no additional work to do for this file as + // it is already compiled. If this value implements linker.File, there is no + // additional work. Otherwise, the additional work is to compute an index of + // symbols in the file, for efficient lookup. + Desc protoreflect.FileDescriptor +} + +// ResolverFunc is a simple function type that implements Resolver. +type ResolverFunc func(string) (SearchResult, error) + +var _ Resolver = ResolverFunc(nil) + +func (f ResolverFunc) FindFileByPath(path string) (SearchResult, error) { + return f(path) +} + +// CompositeResolver is a slice of resolvers, which are consulted in order +// until one can supply a result. If none of the constituent resolvers can +// supply a result, the error returned by the first resolver is returned. If +// the slice of resolvers is empty, all operations return +// protoregistry.NotFound. +type CompositeResolver []Resolver + +var _ Resolver = CompositeResolver(nil) + +func (f CompositeResolver) FindFileByPath(path string) (SearchResult, error) { + if len(f) == 0 { + return SearchResult{}, protoregistry.NotFound + } + var firstErr error + for _, res := range f { + r, err := res.FindFileByPath(path) + if err == nil { + return r, nil + } + if firstErr == nil { + firstErr = err + } + } + return SearchResult{}, firstErr +} + +// SourceResolver can resolve file names by returning source code. It uses +// an optional list of import paths to search. By default, it searches the +// file system. +type SourceResolver struct { + // Optional list of import paths. If present and not empty, then all + // file paths to find are assumed to be relative to one of these paths. + // If nil or empty, all file paths to find are assumed to be relative to + // the current working directory. + ImportPaths []string + // Optional function for returning a file's contents. If nil, then + // os.Open is used to open files on the file system. + // + // This function must be thread-safe as a single compilation operation + // could result in concurrent invocations of this function from + // multiple goroutines. + Accessor func(path string) (io.ReadCloser, error) +} + +var _ Resolver = (*SourceResolver)(nil) + +func (r *SourceResolver) FindFileByPath(path string) (SearchResult, error) { + if len(r.ImportPaths) == 0 { + reader, err := r.accessFile(path) + if err != nil { + return SearchResult{}, err + } + return SearchResult{Source: reader}, nil + } + + var e error + for _, importPath := range r.ImportPaths { + reader, err := r.accessFile(filepath.Join(importPath, path)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + e = err + continue + } + return SearchResult{}, err + } + return SearchResult{Source: reader}, nil + } + return SearchResult{}, e +} + +func (r *SourceResolver) accessFile(path string) (io.ReadCloser, error) { + if r.Accessor != nil { + return r.Accessor(path) + } + return os.Open(path) +} + +// SourceAccessorFromMap returns a function that can be used as the Accessor +// field of a SourceResolver that uses the given map to load source. The map +// keys are file names and the values are the corresponding file contents. +// +// The given map is used directly and not copied. Since accessor functions +// must be thread-safe, this means that the provided map must not be mutated +// once this accessor is provided to a compile operation. +func SourceAccessorFromMap(srcs map[string]string) func(string) (io.ReadCloser, error) { + return func(path string) (io.ReadCloser, error) { + src, ok := srcs[path] + if !ok { + return nil, os.ErrNotExist + } + return io.NopCloser(strings.NewReader(src)), nil + } +} + +// WithStandardImports returns a new resolver that knows about the same standard +// imports that are included with protoc. +func WithStandardImports(r Resolver) Resolver { + return ResolverFunc(func(name string) (SearchResult, error) { + res, err := r.FindFileByPath(name) + if err != nil { + // error from given resolver? see if it's a known standard file + if d, ok := standardImports[name]; ok { + return SearchResult{Desc: d}, nil + } + } + return res, err + }) +} diff --git a/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go b/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go new file mode 100644 index 00000000..6f32f28d --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go @@ -0,0 +1,814 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sourceinfo contains the logic for computing source code info for a +// file descriptor. +// +// The inputs to the computation are an AST for a file as well as the index of +// interpreted options for that file. +package sourceinfo + +import ( + "bytes" + "fmt" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/options" +) + +// GenerateSourceInfo generates source code info for the given AST. If the given +// opts is present, it can generate source code info for interpreted options. +// Otherwise, any options in the AST will get source code info as uninterpreted +// options. +// +// This includes comments only for locations that represent complete declarations. +// This is the same behavior as protoc, the reference compiler for Protocol Buffers. +func GenerateSourceInfo(file *ast.FileNode, opts options.Index) *descriptorpb.SourceCodeInfo { + return generateSourceInfo(file, opts, false) +} + +// GenerateSourceInfoWithExtraComments generates source code info for the given +// AST. If the given opts is present, it can generate source code info for +// interpreted options. Otherwise, any options in the AST will get source code +// info as uninterpreted options. +// +// This includes comments for all locations. This is still lossy, but less so as +// it preserves far more comments from the source file. +func GenerateSourceInfoWithExtraComments(file *ast.FileNode, opts options.Index) *descriptorpb.SourceCodeInfo { + return generateSourceInfo(file, opts, true) +} + +func generateSourceInfo(file *ast.FileNode, opts options.Index, extraComments bool) *descriptorpb.SourceCodeInfo { + if file == nil { + return nil + } + + sci := sourceCodeInfo{file: file, commentsUsed: map[ast.SourcePos]struct{}{}, extraComments: extraComments} + path := make([]int32, 0, 10) + + sci.newLocWithoutComments(file, nil) + + if file.Syntax != nil { + sci.newLocWithComments(file.Syntax, append(path, internal.FileSyntaxTag)) + } + + var depIndex, pubDepIndex, weakDepIndex, optIndex, msgIndex, enumIndex, extendIndex, svcIndex int32 + + for _, child := range file.Decls { + switch child := child.(type) { + case *ast.ImportNode: + sci.newLocWithComments(child, append(path, internal.FileDependencyTag, depIndex)) + depIndex++ + if child.Public != nil { + sci.newLoc(child.Public, append(path, internal.FilePublicDependencyTag, pubDepIndex)) + pubDepIndex++ + } else if child.Weak != nil { + sci.newLoc(child.Weak, append(path, internal.FileWeakDependencyTag, weakDepIndex)) + weakDepIndex++ + } + case *ast.PackageNode: + sci.newLocWithComments(child, append(path, internal.FilePackageTag)) + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, &sci, child, false, &optIndex, append(path, internal.FileOptionsTag)) + case *ast.MessageNode: + generateSourceCodeInfoForMessage(opts, &sci, child, nil, append(path, internal.FileMessagesTag, msgIndex)) + msgIndex++ + case *ast.EnumNode: + generateSourceCodeInfoForEnum(opts, &sci, child, append(path, internal.FileEnumsTag, enumIndex)) + enumIndex++ + case *ast.ExtendNode: + generateSourceCodeInfoForExtensions(opts, &sci, child, &extendIndex, &msgIndex, append(path, internal.FileExtensionsTag), append(dup(path), internal.FileMessagesTag)) + case *ast.ServiceNode: + generateSourceCodeInfoForService(opts, &sci, child, append(path, internal.FileServicesTag, svcIndex)) + svcIndex++ + } + } + + return &descriptorpb.SourceCodeInfo{Location: sci.locs} +} + +func generateSourceCodeInfoForOption(opts options.Index, sci *sourceCodeInfo, n *ast.OptionNode, compact bool, uninterpIndex *int32, path []int32) { + if !compact { + sci.newLocWithoutComments(n, path) + } + subPath := opts[n] + if len(subPath) > 0 { + p := make([]int32, len(path), len(path)+len(subPath)) + copy(p, path) + if subPath[0] == -1 { + // used by "default" and "json_name" field pseudo-options + // to attribute path to parent element (since those are + // stored directly on the descriptor, not its options) + subPath = subPath[1:] + p = p[:len(path)-1] + } + p = append(p, subPath...) + if compact { + sci.newLoc(n, p) + } else { + sci.newLocWithComments(n, p) + } + return + } + + // it's an uninterpreted option + optPath := path + optPath = append(optPath, internal.UninterpretedOptionsTag, *uninterpIndex) + *uninterpIndex++ + sci.newLoc(n, optPath) + var valTag int32 + switch n.Val.(type) { + case ast.IdentValueNode: + valTag = internal.UninterpretedIdentTag + case *ast.NegativeIntLiteralNode: + valTag = internal.UninterpretedNegIntTag + case ast.IntValueNode: + valTag = internal.UninterpretedPosIntTag + case ast.FloatValueNode: + valTag = internal.UninterpretedDoubleTag + case ast.StringValueNode: + valTag = internal.UninterpretedStringTag + case *ast.MessageLiteralNode: + valTag = internal.UninterpretedAggregateTag + } + if valTag != 0 { + sci.newLoc(n.Val, append(optPath, valTag)) + } + for j, nn := range n.Name.Parts { + optNmPath := optPath + optNmPath = append(optNmPath, internal.UninterpretedNameTag, int32(j)) + sci.newLoc(nn, optNmPath) + sci.newLoc(nn.Name, append(optNmPath, internal.UninterpretedNameNameTag)) + } +} + +func generateSourceCodeInfoForMessage(opts options.Index, sci *sourceCodeInfo, n ast.MessageDeclNode, fieldPath []int32, path []int32) { + var openBrace ast.Node + + var decls []ast.MessageElement + switch n := n.(type) { + case *ast.MessageNode: + openBrace = n.OpenBrace + decls = n.Decls + case *ast.GroupNode: + openBrace = n.OpenBrace + decls = n.Decls + case *ast.MapFieldNode: + sci.newLoc(n, path) + // map entry so nothing else to do + return + } + sci.newBlockLocWithComments(n, openBrace, path) + + sci.newLoc(n.MessageName(), append(path, internal.MessageNameTag)) + // matching protoc, which emits the corresponding field type name (for group fields) + // right after the source location for the group message name + if fieldPath != nil { + sci.newLoc(n.MessageName(), append(fieldPath, internal.FieldTypeNameTag)) + } + + var optIndex, fieldIndex, oneOfIndex, extendIndex, nestedMsgIndex int32 + var nestedEnumIndex, extRangeIndex, reservedRangeIndex, reservedNameIndex int32 + for _, child := range decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.MessageOptionsTag)) + case *ast.FieldNode: + generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex)) + fieldIndex++ + case *ast.GroupNode: + fldPath := path + fldPath = append(fldPath, internal.MessageFieldsTag, fieldIndex) + generateSourceCodeInfoForField(opts, sci, child, fldPath) + fieldIndex++ + generateSourceCodeInfoForMessage(opts, sci, child, fldPath, append(dup(path), internal.MessageNestedMessagesTag, nestedMsgIndex)) + nestedMsgIndex++ + case *ast.MapFieldNode: + generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex)) + fieldIndex++ + nestedMsgIndex++ + case *ast.OneOfNode: + generateSourceCodeInfoForOneOf(opts, sci, child, &fieldIndex, &nestedMsgIndex, append(path, internal.MessageFieldsTag), append(dup(path), internal.MessageNestedMessagesTag), append(dup(path), internal.MessageOneOfsTag, oneOfIndex)) + oneOfIndex++ + case *ast.MessageNode: + generateSourceCodeInfoForMessage(opts, sci, child, nil, append(path, internal.MessageNestedMessagesTag, nestedMsgIndex)) + nestedMsgIndex++ + case *ast.EnumNode: + generateSourceCodeInfoForEnum(opts, sci, child, append(path, internal.MessageEnumsTag, nestedEnumIndex)) + nestedEnumIndex++ + case *ast.ExtendNode: + generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &nestedMsgIndex, append(path, internal.MessageExtensionsTag), append(dup(path), internal.MessageNestedMessagesTag)) + case *ast.ExtensionRangeNode: + generateSourceCodeInfoForExtensionRanges(opts, sci, child, &extRangeIndex, append(path, internal.MessageExtensionRangesTag)) + case *ast.ReservedNode: + if len(child.Names) > 0 { + resPath := path + resPath = append(resPath, internal.MessageReservedNamesTag) + sci.newLocWithComments(child, resPath) + for _, rn := range child.Names { + sci.newLoc(rn, append(resPath, reservedNameIndex)) + reservedNameIndex++ + } + } + if len(child.Ranges) > 0 { + resPath := path + resPath = append(resPath, internal.MessageReservedRangesTag) + sci.newLocWithComments(child, resPath) + for _, rr := range child.Ranges { + generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex)) + reservedRangeIndex++ + } + } + } + } +} + +func generateSourceCodeInfoForEnum(opts options.Index, sci *sourceCodeInfo, n *ast.EnumNode, path []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, path) + sci.newLoc(n.Name, append(path, internal.EnumNameTag)) + + var optIndex, valIndex, reservedNameIndex, reservedRangeIndex int32 + for _, child := range n.Decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.EnumOptionsTag)) + case *ast.EnumValueNode: + generateSourceCodeInfoForEnumValue(opts, sci, child, append(path, internal.EnumValuesTag, valIndex)) + valIndex++ + case *ast.ReservedNode: + if len(child.Names) > 0 { + resPath := path + resPath = append(resPath, internal.EnumReservedNamesTag) + sci.newLocWithComments(child, resPath) + for _, rn := range child.Names { + sci.newLoc(rn, append(resPath, reservedNameIndex)) + reservedNameIndex++ + } + } + if len(child.Ranges) > 0 { + resPath := path + resPath = append(resPath, internal.EnumReservedRangesTag) + sci.newLocWithComments(child, resPath) + for _, rr := range child.Ranges { + generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex)) + reservedRangeIndex++ + } + } + } + } +} + +func generateSourceCodeInfoForEnumValue(opts options.Index, sci *sourceCodeInfo, n *ast.EnumValueNode, path []int32) { + sci.newLocWithComments(n, path) + sci.newLoc(n.Name, append(path, internal.EnumValNameTag)) + sci.newLoc(n.Number, append(path, internal.EnumValNumberTag)) + + // enum value options + if n.Options != nil { + optsPath := path + optsPath = append(optsPath, internal.EnumValOptionsTag) + sci.newLoc(n.Options, optsPath) + var optIndex int32 + for _, opt := range n.Options.GetElements() { + generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath) + } + } +} + +func generateSourceCodeInfoForReservedRange(sci *sourceCodeInfo, n *ast.RangeNode, path []int32) { + sci.newLoc(n, path) + sci.newLoc(n.StartVal, append(path, internal.ReservedRangeStartTag)) + switch { + case n.EndVal != nil: + sci.newLoc(n.EndVal, append(path, internal.ReservedRangeEndTag)) + case n.Max != nil: + sci.newLoc(n.Max, append(path, internal.ReservedRangeEndTag)) + default: + sci.newLoc(n.StartVal, append(path, internal.ReservedRangeEndTag)) + } +} + +func generateSourceCodeInfoForExtensions(opts options.Index, sci *sourceCodeInfo, n *ast.ExtendNode, extendIndex, msgIndex *int32, extendPath, msgPath []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, extendPath) + for _, decl := range n.Decls { + switch decl := decl.(type) { + case *ast.FieldNode: + generateSourceCodeInfoForField(opts, sci, decl, append(extendPath, *extendIndex)) + *extendIndex++ + case *ast.GroupNode: + fldPath := extendPath + fldPath = append(fldPath, *extendIndex) + generateSourceCodeInfoForField(opts, sci, decl, fldPath) + *extendIndex++ + generateSourceCodeInfoForMessage(opts, sci, decl, fldPath, append(msgPath, *msgIndex)) + *msgIndex++ + } + } +} + +func generateSourceCodeInfoForOneOf(opts options.Index, sci *sourceCodeInfo, n *ast.OneOfNode, fieldIndex, nestedMsgIndex *int32, fieldPath, nestedMsgPath, oneOfPath []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, oneOfPath) + sci.newLoc(n.Name, append(oneOfPath, internal.OneOfNameTag)) + + var optIndex int32 + for _, child := range n.Decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(oneOfPath, internal.OneOfOptionsTag)) + case *ast.FieldNode: + generateSourceCodeInfoForField(opts, sci, child, append(fieldPath, *fieldIndex)) + *fieldIndex++ + case *ast.GroupNode: + fldPath := fieldPath + fldPath = append(fldPath, *fieldIndex) + generateSourceCodeInfoForField(opts, sci, child, fldPath) + *fieldIndex++ + generateSourceCodeInfoForMessage(opts, sci, child, fldPath, append(nestedMsgPath, *nestedMsgIndex)) + *nestedMsgIndex++ + } + } +} + +func generateSourceCodeInfoForField(opts options.Index, sci *sourceCodeInfo, n ast.FieldDeclNode, path []int32) { + var fieldType string + if f, ok := n.(*ast.FieldNode); ok { + fieldType = string(f.FldType.AsIdentifier()) + } + + if n.GetGroupKeyword() != nil { + // comments will appear on group message + sci.newLocWithoutComments(n, path) + if n.FieldExtendee() != nil { + sci.newLoc(n.FieldExtendee(), append(path, internal.FieldExtendeeTag)) + } + if n.FieldLabel() != nil { + // no comments here either (label is first token for group, so we want + // to leave the comments to be associated with the group message instead) + sci.newLocWithoutComments(n.FieldLabel(), append(path, internal.FieldLabelTag)) + } + sci.newLoc(n.FieldType(), append(path, internal.FieldTypeTag)) + // let the name comments be attributed to the group name + sci.newLocWithoutComments(n.FieldName(), append(path, internal.FieldNameTag)) + } else { + sci.newLocWithComments(n, path) + if n.FieldExtendee() != nil { + sci.newLoc(n.FieldExtendee(), append(path, internal.FieldExtendeeTag)) + } + if n.FieldLabel() != nil { + sci.newLoc(n.FieldLabel(), append(path, internal.FieldLabelTag)) + } + var tag int32 + if _, isScalar := internal.FieldTypes[fieldType]; isScalar { + tag = internal.FieldTypeTag + } else { + // this is a message or an enum, so attribute type location + // to the type name field + tag = internal.FieldTypeNameTag + } + sci.newLoc(n.FieldType(), append(path, tag)) + sci.newLoc(n.FieldName(), append(path, internal.FieldNameTag)) + } + sci.newLoc(n.FieldTag(), append(path, internal.FieldNumberTag)) + + if n.GetOptions() != nil { + optsPath := path + optsPath = append(optsPath, internal.FieldOptionsTag) + sci.newLoc(n.GetOptions(), optsPath) + var optIndex int32 + for _, opt := range n.GetOptions().GetElements() { + generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath) + } + } +} + +func generateSourceCodeInfoForExtensionRanges(opts options.Index, sci *sourceCodeInfo, n *ast.ExtensionRangeNode, extRangeIndex *int32, path []int32) { + sci.newLocWithComments(n, path) + startExtRangeIndex := *extRangeIndex + for _, child := range n.Ranges { + path := append(path, *extRangeIndex) + *extRangeIndex++ + sci.newLoc(child, path) + sci.newLoc(child.StartVal, append(path, internal.ExtensionRangeStartTag)) + switch { + case child.EndVal != nil: + sci.newLoc(child.EndVal, append(path, internal.ExtensionRangeEndTag)) + case child.Max != nil: + sci.newLoc(child.Max, append(path, internal.ExtensionRangeEndTag)) + default: + sci.newLoc(child.StartVal, append(path, internal.ExtensionRangeEndTag)) + } + } + // options for all ranges go after the start+end values + for range n.Ranges { + path := append(path, startExtRangeIndex) + startExtRangeIndex++ + if n.Options != nil { + optsPath := path + optsPath = append(optsPath, internal.ExtensionRangeOptionsTag) + sci.newLoc(n.Options, optsPath) + var optIndex int32 + for _, opt := range n.Options.GetElements() { + generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath) + } + } + } +} + +func generateSourceCodeInfoForService(opts options.Index, sci *sourceCodeInfo, n *ast.ServiceNode, path []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, path) + sci.newLoc(n.Name, append(path, internal.ServiceNameTag)) + var optIndex, rpcIndex int32 + for _, child := range n.Decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.ServiceOptionsTag)) + case *ast.RPCNode: + generateSourceCodeInfoForMethod(opts, sci, child, append(path, internal.ServiceMethodsTag, rpcIndex)) + rpcIndex++ + } + } +} + +func generateSourceCodeInfoForMethod(opts options.Index, sci *sourceCodeInfo, n *ast.RPCNode, path []int32) { + if n.OpenBrace != nil { + sci.newBlockLocWithComments(n, n.OpenBrace, path) + } else { + sci.newLocWithComments(n, path) + } + sci.newLoc(n.Name, append(path, internal.MethodNameTag)) + if n.Input.Stream != nil { + sci.newLoc(n.Input.Stream, append(path, internal.MethodInputStreamTag)) + } + sci.newLoc(n.Input.MessageType, append(path, internal.MethodInputTag)) + if n.Output.Stream != nil { + sci.newLoc(n.Output.Stream, append(path, internal.MethodOutputStreamTag)) + } + sci.newLoc(n.Output.MessageType, append(path, internal.MethodOutputTag)) + + optsPath := path + optsPath = append(optsPath, internal.MethodOptionsTag) + var optIndex int32 + for _, decl := range n.Decls { + if opt, ok := decl.(*ast.OptionNode); ok { + generateSourceCodeInfoForOption(opts, sci, opt, false, &optIndex, optsPath) + } + } +} + +type sourceCodeInfo struct { + file *ast.FileNode + extraComments bool + locs []*descriptorpb.SourceCodeInfo_Location + commentsUsed map[ast.SourcePos]struct{} +} + +func (sci *sourceCodeInfo) newLocWithoutComments(n ast.Node, path []int32) { + dup := make([]int32, len(path)) + copy(dup, path) + var start, end ast.SourcePos + if n == sci.file { + // For files, we don't want to consider trailing EOF token + // as part of the span. We want the span to only include + // actual lexical elements in the file (which also excludes + // whitespace and comments). + children := sci.file.Children() + if len(children) > 0 && isEOF(children[len(children)-1]) { + children = children[:len(children)-1] + } + if len(children) == 0 { + start = ast.SourcePos{Filename: sci.file.Name(), Line: 1, Col: 1} + end = start + } else { + start = sci.file.TokenInfo(n.Start()).Start() + end = sci.file.TokenInfo(children[len(children)-1].End()).End() + } + } else { + info := sci.file.NodeInfo(n) + start, end = info.Start(), info.End() + } + sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ + Path: dup, + Span: makeSpan(start, end), + }) +} + +func (sci *sourceCodeInfo) newLoc(n ast.Node, path []int32) { + info := sci.file.NodeInfo(n) + if !sci.extraComments { + dup := make([]int32, len(path)) + copy(dup, path) + start, end := info.Start(), info.End() + sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ + Path: dup, + Span: makeSpan(start, end), + }) + } else { + detachedComments, leadingComments := sci.getLeadingComments(n) + trailingComments := sci.getTrailingComments(n) + sci.newLocWithGivenComments(info, detachedComments, leadingComments, trailingComments, path) + } +} + +func isEOF(n ast.Node) bool { + r, ok := n.(*ast.RuneNode) + return ok && r.Rune == 0 +} + +func (sci *sourceCodeInfo) newBlockLocWithComments(n, openBrace ast.Node, path []int32) { + // Block definitions use trailing comments after the open brace "{" as the + // element's trailing comments. For example: + // + // message Foo { // this is a trailing comment for a message + // + // } // not this + // + nodeInfo := sci.file.NodeInfo(n) + detachedComments, leadingComments := sci.getLeadingComments(n) + trailingComments := sci.getTrailingComments(openBrace) + sci.newLocWithGivenComments(nodeInfo, detachedComments, leadingComments, trailingComments, path) +} + +func (sci *sourceCodeInfo) newLocWithComments(n ast.Node, path []int32) { + nodeInfo := sci.file.NodeInfo(n) + detachedComments, leadingComments := sci.getLeadingComments(n) + trailingComments := sci.getTrailingComments(n) + sci.newLocWithGivenComments(nodeInfo, detachedComments, leadingComments, trailingComments, path) +} + +func (sci *sourceCodeInfo) newLocWithGivenComments(nodeInfo ast.NodeInfo, detachedComments []comments, leadingComments comments, trailingComments comments, path []int32) { + if (len(detachedComments) > 0 && sci.commentUsed(detachedComments[0])) || + (len(detachedComments) == 0 && sci.commentUsed(leadingComments)) { + detachedComments = nil + leadingComments = ast.EmptyComments + } + if sci.commentUsed(trailingComments) { + trailingComments = ast.EmptyComments + } + + var trail *string + if trailingComments.Len() > 0 { + trail = proto.String(sci.combineComments(trailingComments)) + } + + var lead *string + if leadingComments.Len() > 0 { + lead = proto.String(sci.combineComments(leadingComments)) + } + + detached := make([]string, len(detachedComments)) + for i, cmts := range detachedComments { + detached[i] = sci.combineComments(cmts) + } + + dup := make([]int32, len(path)) + copy(dup, path) + sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ + LeadingDetachedComments: detached, + LeadingComments: lead, + TrailingComments: trail, + Path: dup, + Span: makeSpan(nodeInfo.Start(), nodeInfo.End()), + }) +} + +type comments interface { + Len() int + Index(int) ast.Comment +} + +type subComments struct { + offs, n int + c ast.Comments +} + +func (s subComments) Len() int { + return s.n +} + +func (s subComments) Index(i int) ast.Comment { + if i < 0 || i >= s.n { + panic(fmt.Errorf("runtime error: index out of range [%d] with length %d", i, s.n)) + } + return s.c.Index(i + s.offs) +} + +func (sci *sourceCodeInfo) getLeadingComments(n ast.Node) ([]comments, comments) { + s := n.Start() + info := sci.file.TokenInfo(s) + var prevInfo ast.NodeInfo + if prev, ok := sci.file.Tokens().Previous(s); ok { + prevInfo = sci.file.TokenInfo(prev) + } + _, d, l := sci.attributeComments(prevInfo, info) + return d, l +} + +func (sci *sourceCodeInfo) getTrailingComments(n ast.Node) comments { + e := n.End() + next, ok := sci.file.Tokens().Next(e) + if !ok { + return ast.EmptyComments + } + info := sci.file.TokenInfo(e) + nextInfo := sci.file.TokenInfo(next) + t, _, _ := sci.attributeComments(info, nextInfo) + return t +} + +func (sci *sourceCodeInfo) attributeComments(prevInfo, info ast.NodeInfo) (t comments, d []comments, l comments) { + detached := groupComments(info.LeadingComments()) + var trail comments + if prevInfo.IsValid() { + trail = comments(prevInfo.TrailingComments()) + if trail.Len() == 0 { + trail, detached = sci.maybeDonate(prevInfo, info, detached) + } + } else { + trail = ast.EmptyComments + } + detached, lead := sci.maybeAttach(prevInfo, info, trail.Len() > 0, detached) + return trail, detached, lead +} + +func (sci *sourceCodeInfo) maybeDonate(prevInfo ast.NodeInfo, info ast.NodeInfo, lead []comments) (t comments, l []comments) { + if len(lead) == 0 { + // nothing to donate + return ast.EmptyComments, nil + } + firstCommentPos := lead[0].Index(0) + if firstCommentPos.Start().Line > prevInfo.End().Line+1 { + // first comment is detached from previous token, so can't be a trailing comment + return ast.EmptyComments, lead + } + if len(lead) > 1 { + // multiple groups? then donate first comment to previous token + return lead[0], lead[1:] + } + // there is only one element in lead + comment := lead[0] + lastCommentPos := comment.Index(comment.Len() - 1) + if lastCommentPos.End().Line < info.Start().Line-1 { + // there is a blank line between the comments and subsequent token, so + // we can donate the comment to previous token + return comment, nil + } + if txt := info.RawText(); txt == "" || (len(txt) == 1 && strings.ContainsAny(txt, "}]),;")) { + // token is a symbol for the end of a scope or EOF, which doesn't need a leading comment + if !sci.extraComments && txt != "" && + firstCommentPos.Start().Line == prevInfo.End().Line && + lastCommentPos.End().Line == info.Start().Line { + // protoc does not donate if prev and next token are on the same line since it's + // ambiguous which one should get the comment; so we mirror that here + return ast.EmptyComments, lead + } + // But with extra comments, we always donate in this situation in order to capture + // more comments. Because otherwise, these comments are lost since these symbols + // don't map to a location in source code info. + return comment, nil + } + // cannot donate + return ast.EmptyComments, lead +} + +func (sci *sourceCodeInfo) maybeAttach(prevInfo ast.NodeInfo, info ast.NodeInfo, hasTrail bool, lead []comments) (d []comments, l comments) { + if len(lead) == 0 { + return nil, ast.EmptyComments + } + + if len(lead) == 1 && !hasTrail && prevInfo.IsValid() { + // If the one comment appears attached to both previous and next tokens, + // don't attach to either. + comment := lead[0] + attachedToPrevious := comment.Index(0).Start().Line == prevInfo.End().Line + attachedToNext := comment.Index(comment.Len()-1).End().Line == info.Start().Line + if attachedToPrevious && attachedToNext { + // Since attachment is ambiguous, leave it detached. + return lead, ast.EmptyComments + } + } + + lastComment := lead[len(lead)-1] + if lastComment.Index(lastComment.Len()-1).End().Line >= info.Start().Line-1 { + return lead[:len(lead)-1], lastComment + } + + return lead, ast.EmptyComments +} + +func makeSpan(start, end ast.SourcePos) []int32 { + if start.Line == end.Line { + return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Col) - 1} + } + return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Line) - 1, int32(end.Col) - 1} +} + +func (sci *sourceCodeInfo) commentUsed(c comments) bool { + if c.Len() == 0 { + return false + } + pos := c.Index(0).Start() + if _, ok := sci.commentsUsed[pos]; ok { + return true + } + + sci.commentsUsed[pos] = struct{}{} + return false +} + +func groupComments(cmts ast.Comments) []comments { + if cmts.Len() == 0 { + return nil + } + var groups []comments + singleLineStyle := cmts.Index(0).RawText()[:2] == "//" + line := cmts.Index(0).End().Line + start := 0 + for i := 1; i < cmts.Len(); i++ { + c := cmts.Index(i) + prevSingleLine := singleLineStyle + singleLineStyle = strings.HasPrefix(c.RawText(), "//") + if !singleLineStyle || prevSingleLine != singleLineStyle || c.Start().Line > line+1 { + // new group! + groups = append(groups, subComments{offs: start, n: i - start, c: cmts}) + start = i + } + line = c.End().Line + } + // don't forget last group + groups = append(groups, subComments{offs: start, n: cmts.Len() - start, c: cmts}) + return groups +} + +func (sci *sourceCodeInfo) combineComments(comments comments) string { + if comments.Len() == 0 { + return "" + } + var buf bytes.Buffer + for i, l := 0, comments.Len(); i < l; i++ { + c := comments.Index(i) + txt := c.RawText() + if txt[:2] == "//" { + buf.WriteString(txt[2:]) + // protoc includes trailing newline for line comments, + // but it's not present in the AST comment. So we need + // to add it if present. + if i, ok := sci.file.Items().Next(c.AsItem()); ok { + info := sci.file.ItemInfo(i) + if strings.HasPrefix(info.LeadingWhitespace(), "\n") { + buf.WriteRune('\n') + } + } + } else { + lines := strings.Split(txt[2:len(txt)-2], "\n") + first := true + for _, l := range lines { + if first { + first = false + buf.WriteString(l) + continue + } + buf.WriteByte('\n') + + // strip a prefix of whitespace followed by '*' + j := 0 + for j < len(l) { + if l[j] != ' ' && l[j] != '\t' { + break + } + j++ + } + switch { + case j == len(l): + l = "" + case l[j] == '*': + l = l[j+1:] + case j > 0: + l = l[j:] + } + + buf.WriteString(l) + } + } + } + return buf.String() +} + +func dup(p []int32) []int32 { + return append(([]int32)(nil), p...) +} diff --git a/vendor/github.com/bufbuild/protocompile/std_imports.go b/vendor/github.com/bufbuild/protocompile/std_imports.go new file mode 100644 index 00000000..dcf8553a --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/std_imports.go @@ -0,0 +1,62 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protocompile + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + _ "google.golang.org/protobuf/types/known/anypb" // link in packages that include the standard protos included with protoc. + _ "google.golang.org/protobuf/types/known/apipb" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/emptypb" + _ "google.golang.org/protobuf/types/known/fieldmaskpb" + _ "google.golang.org/protobuf/types/known/sourcecontextpb" + _ "google.golang.org/protobuf/types/known/structpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + _ "google.golang.org/protobuf/types/known/typepb" + _ "google.golang.org/protobuf/types/known/wrapperspb" + _ "google.golang.org/protobuf/types/pluginpb" +) + +// All files that are included with protoc are also included with this package +// so that clients do not need to explicitly supply a copy of these protos (just +// like callers of protoc do not need to supply them). +var standardImports map[string]protoreflect.FileDescriptor + +func init() { + standardFilenames := []string{ + "google/protobuf/any.proto", + "google/protobuf/api.proto", + "google/protobuf/compiler/plugin.proto", + "google/protobuf/descriptor.proto", + "google/protobuf/duration.proto", + "google/protobuf/empty.proto", + "google/protobuf/field_mask.proto", + "google/protobuf/source_context.proto", + "google/protobuf/struct.proto", + "google/protobuf/timestamp.proto", + "google/protobuf/type.proto", + "google/protobuf/wrappers.proto", + } + + standardImports = map[string]protoreflect.FileDescriptor{} + for _, fn := range standardFilenames { + fd, err := protoregistry.GlobalFiles.FindFileByPath(fn) + if err != nil { + panic(err.Error()) + } + standardImports[fn] = fd + } +} diff --git a/vendor/github.com/bufbuild/protocompile/walk/walk.go b/vendor/github.com/bufbuild/protocompile/walk/walk.go new file mode 100644 index 00000000..e7a1ab3b --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/walk/walk.go @@ -0,0 +1,445 @@ +// Copyright 2020-2022 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package walk provides helper functions for traversing all elements in a +// protobuf file descriptor. There are versions both for traversing "rich" +// descriptors (protoreflect.Descriptor) and for traversing the underlying +// "raw" descriptor protos. +// +// # Enter And Exit +// +// This package includes variants of the functions that accept two callback +// functions. These variants have names ending with "EnterAndExit". One function +// is called as each element is visited ("enter") and the other is called after +// the element and all of its descendants have been visited ("exit"). This +// can be useful when you need to track state that is scoped to the visitation +// of a single element. +// +// # Source Path +// +// When traversing raw descriptor protos, this package include variants whose +// callback accepts a protoreflect.SourcePath. These variants have names that +// include "WithPath". This path can be used to locate corresponding data in the +// file's source code info (if present). +package walk + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/internal" +) + +// Descriptors walks all descriptors in the given file using a depth-first +// traversal, calling the given function for each descriptor in the hierarchy. +// The walk ends when traversal is complete or when the function returns an +// error. If the function returns an error, that is returned as the result of the +// walk operation. +// +// Descriptors are visited using a pre-order traversal, where the function is +// called for a descriptor before it is called for any of its descendants. +func Descriptors(file protoreflect.FileDescriptor, fn func(protoreflect.Descriptor) error) error { + return DescriptorsEnterAndExit(file, fn, nil) +} + +// DescriptorsEnterAndExit walks all descriptors in the given file using a +// depth-first traversal, calling the given functions on entry and on exit +// for each descriptor in the hierarchy. The walk ends when traversal is +// complete or when a function returns an error. If a function returns an error, +// that is returned as the result of the walk operation. +// +// The enter function is called using a pre-order traversal, where the function +// is called for a descriptor before it is called for any of its descendants. +// The exit function is called using a post-order traversal, where the function +// is called for a descriptor only after it is called for any descendants. +func DescriptorsEnterAndExit(file protoreflect.FileDescriptor, enter, exit func(protoreflect.Descriptor) error) error { + for i := 0; i < file.Messages().Len(); i++ { + msg := file.Messages().Get(i) + if err := messageDescriptor(msg, enter, exit); err != nil { + return err + } + } + for i := 0; i < file.Enums().Len(); i++ { + en := file.Enums().Get(i) + if err := enumDescriptor(en, enter, exit); err != nil { + return err + } + } + for i := 0; i < file.Extensions().Len(); i++ { + ext := file.Extensions().Get(i) + if err := enter(ext); err != nil { + return err + } + if exit != nil { + if err := exit(ext); err != nil { + return err + } + } + } + for i := 0; i < file.Services().Len(); i++ { + svc := file.Services().Get(i) + if err := enter(svc); err != nil { + return err + } + for i := 0; i < svc.Methods().Len(); i++ { + mtd := svc.Methods().Get(i) + if err := enter(mtd); err != nil { + return err + } + if exit != nil { + if err := exit(mtd); err != nil { + return err + } + } + } + if exit != nil { + if err := exit(svc); err != nil { + return err + } + } + } + return nil +} + +func messageDescriptor(msg protoreflect.MessageDescriptor, enter, exit func(protoreflect.Descriptor) error) error { + if err := enter(msg); err != nil { + return err + } + for i := 0; i < msg.Fields().Len(); i++ { + fld := msg.Fields().Get(i) + if err := enter(fld); err != nil { + return err + } + if exit != nil { + if err := exit(fld); err != nil { + return err + } + } + } + for i := 0; i < msg.Oneofs().Len(); i++ { + oo := msg.Oneofs().Get(i) + if err := enter(oo); err != nil { + return err + } + if exit != nil { + if err := exit(oo); err != nil { + return err + } + } + } + for i := 0; i < msg.Messages().Len(); i++ { + nested := msg.Messages().Get(i) + if err := messageDescriptor(nested, enter, exit); err != nil { + return err + } + } + for i := 0; i < msg.Enums().Len(); i++ { + en := msg.Enums().Get(i) + if err := enumDescriptor(en, enter, exit); err != nil { + return err + } + } + for i := 0; i < msg.Extensions().Len(); i++ { + ext := msg.Extensions().Get(i) + if err := enter(ext); err != nil { + return err + } + if exit != nil { + if err := exit(ext); err != nil { + return err + } + } + } + if exit != nil { + if err := exit(msg); err != nil { + return err + } + } + return nil +} + +func enumDescriptor(en protoreflect.EnumDescriptor, enter, exit func(protoreflect.Descriptor) error) error { + if err := enter(en); err != nil { + return err + } + for i := 0; i < en.Values().Len(); i++ { + enVal := en.Values().Get(i) + if err := enter(enVal); err != nil { + return err + } + if exit != nil { + if err := exit(enVal); err != nil { + return err + } + } + } + if exit != nil { + if err := exit(en); err != nil { + return err + } + } + return nil +} + +// DescriptorProtosWithPath walks all descriptor protos in the given file using +// a depth-first traversal. This is the same as DescriptorProtos except that the +// callback function, fn, receives a protoreflect.SourcePath, that indicates the +// path for the element in the file's source code info. +func DescriptorProtosWithPath(file *descriptorpb.FileDescriptorProto, fn func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error) error { + return DescriptorProtosWithPathEnterAndExit(file, fn, nil) +} + +// DescriptorProtosWithPathEnterAndExit walks all descriptor protos in the given +// file using a depth-first traversal. This is the same as +// DescriptorProtosEnterAndExit except that the callback function, fn, receives +// a protoreflect.SourcePath, that indicates the path for the element in the +// file's source code info. +func DescriptorProtosWithPathEnterAndExit(file *descriptorpb.FileDescriptorProto, enter, exit func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error) error { + w := &protoWalker{usePath: true, enter: enter, exit: exit} + return w.walkDescriptorProtos(file) +} + +// DescriptorProtos walks all descriptor protos in the given file using a +// depth-first traversal, calling the given function for each descriptor proto +// in the hierarchy. The walk ends when traversal is complete or when the +// function returns an error. If the function returns an error, that is +// returned as the result of the walk operation. +// +// Descriptor protos are visited using a pre-order traversal, where the function +// is called for a descriptor before it is called for any of its descendants. +func DescriptorProtos(file *descriptorpb.FileDescriptorProto, fn func(protoreflect.FullName, proto.Message) error) error { + return DescriptorProtosEnterAndExit(file, fn, nil) +} + +// DescriptorProtosEnterAndExit walks all descriptor protos in the given file +// using a depth-first traversal, calling the given functions on entry and on +// exit for each descriptor in the hierarchy. The walk ends when traversal is +// complete or when a function returns an error. If a function returns an error, +// that is returned as the result of the walk operation. +// +// The enter function is called using a pre-order traversal, where the function +// is called for a descriptor proto before it is called for any of its +// descendants. The exit function is called using a post-order traversal, where +// the function is called for a descriptor proto only after it is called for any +// descendants. +func DescriptorProtosEnterAndExit(file *descriptorpb.FileDescriptorProto, enter, exit func(protoreflect.FullName, proto.Message) error) error { + enterWithPath := func(n protoreflect.FullName, p protoreflect.SourcePath, m proto.Message) error { + return enter(n, m) + } + var exitWithPath func(n protoreflect.FullName, p protoreflect.SourcePath, m proto.Message) error + if exit != nil { + exitWithPath = func(n protoreflect.FullName, p protoreflect.SourcePath, m proto.Message) error { + return exit(n, m) + } + } + w := &protoWalker{ + enter: enterWithPath, + exit: exitWithPath, + } + return w.walkDescriptorProtos(file) +} + +type protoWalker struct { + usePath bool + enter, exit func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error +} + +func (w *protoWalker) walkDescriptorProtos(file *descriptorpb.FileDescriptorProto) error { + prefix := file.GetPackage() + if prefix != "" { + prefix += "." + } + var path protoreflect.SourcePath + for i, msg := range file.MessageType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileMessagesTag, int32(i)) + } + if err := w.walkDescriptorProto(prefix, p, msg); err != nil { + return err + } + } + for i, en := range file.EnumType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileEnumsTag, int32(i)) + } + if err := w.walkEnumDescriptorProto(prefix, p, en); err != nil { + return err + } + } + for i, ext := range file.Extension { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileExtensionsTag, int32(i)) + } + fqn := prefix + ext.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + } + } + for i, svc := range file.Service { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileServicesTag, int32(i)) + } + fqn := prefix + svc.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, svc); err != nil { + return err + } + for j, mtd := range svc.Method { + var mp protoreflect.SourcePath + if w.usePath { + mp = p + mp = append(mp, internal.ServiceMethodsTag, int32(j)) + } + mtdFqn := fqn + "." + mtd.GetName() + if err := w.enter(protoreflect.FullName(mtdFqn), mp, mtd); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(mtdFqn), mp, mtd); err != nil { + return err + } + } + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, svc); err != nil { + return err + } + } + } + return nil +} + +func (w *protoWalker) walkDescriptorProto(prefix string, path protoreflect.SourcePath, msg *descriptorpb.DescriptorProto) error { + fqn := prefix + msg.GetName() + if err := w.enter(protoreflect.FullName(fqn), path, msg); err != nil { + return err + } + prefix = fqn + "." + for i, fld := range msg.Field { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageFieldsTag, int32(i)) + } + fqn := prefix + fld.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, fld); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, fld); err != nil { + return err + } + } + } + for i, oo := range msg.OneofDecl { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageOneOfsTag, int32(i)) + } + fqn := prefix + oo.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, oo); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, oo); err != nil { + return err + } + } + } + for i, nested := range msg.NestedType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageNestedMessagesTag, int32(i)) + } + if err := w.walkDescriptorProto(prefix, p, nested); err != nil { + return err + } + } + for i, en := range msg.EnumType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageEnumsTag, int32(i)) + } + if err := w.walkEnumDescriptorProto(prefix, p, en); err != nil { + return err + } + } + for i, ext := range msg.Extension { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageExtensionsTag, int32(i)) + } + fqn := prefix + ext.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + } + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), path, msg); err != nil { + return err + } + } + return nil +} + +func (w *protoWalker) walkEnumDescriptorProto(prefix string, path protoreflect.SourcePath, en *descriptorpb.EnumDescriptorProto) error { + fqn := prefix + en.GetName() + if err := w.enter(protoreflect.FullName(fqn), path, en); err != nil { + return err + } + for i, val := range en.Value { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.EnumValuesTag, int32(i)) + } + fqn := prefix + val.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, val); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, val); err != nil { + return err + } + } + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), path, en); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 00000000..6c16c255 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,530 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 00000000..685c80a6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 00000000..480e2448 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000..85f9f573 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" +) + +const urlPrefix = "type.googleapis.com/" + +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return name, nil +} + +// MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) + if err != nil { + return nil, err + } + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil +} + +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) + if err != nil { + return nil, err + } + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err + } + return proto.MessageV1(mt.New().Interface()), nil +} + +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. +// +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { + var err error + dm.Message, err = Empty(any) + if err != nil { + return err + } + } + m = dm.Message + } + + anyName, err := AnyMessageName(any) + if err != nil { + return err + } + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) + } + return proto.Unmarshal(any.Value, m) +} + +// Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { + return false + } + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000..0ef27d33 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +package any + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/any.proto. + +type Any = anypb.Any + +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000..d3c33259 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000..b2b55dd8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" +) + +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. +const ( + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { + return 0, err + } + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. +func DurationProto(d time.Duration) *durationpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durationpb.Duration{ + Seconds: int64(secs), + Nanos: int32(nanos), + } +} + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000..d0079ee3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +package duration + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/duration.proto. + +type Duration = durationpb.Duration + +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000..8368a3f7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" +) + +// Range of google.protobuf.Duration as specified in timestamp.proto. +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// Timestamp converts a timestamppb.Timestamp to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. +func TimestampNow() *timestamppb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. +func TimestampString(ts *timestamppb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000..a76f8076 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE new file mode 100644 index 00000000..b53b91d8 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 Joshua Humphries + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jhump/protoreflect/codec/codec.go b/vendor/github.com/jhump/protoreflect/codec/codec.go new file mode 100644 index 00000000..7e5c5684 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/codec.go @@ -0,0 +1,218 @@ +package codec + +import ( + "io" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/internal/codec" +) + +// ErrOverflow is returned when an integer is too large to be represented. +var ErrOverflow = codec.ErrOverflow + +// ErrBadWireType is returned when decoding a wire-type from a buffer that +// is not valid. +var ErrBadWireType = codec.ErrBadWireType + +// NB: much of the implementation is in an internal package, to avoid an import +// cycle between this codec package and the desc package. We export it from +// this package, but we can't use a type alias because we also need to add +// methods to it, to broaden the exposed API. + +// Buffer is a reader and a writer that wraps a slice of bytes and also +// provides API for decoding and encoding the protobuf binary format. +// +// Its operation is similar to that of a bytes.Buffer: writing pushes +// data to the end of the buffer while reading pops data from the head +// of the buffer. So the same buffer can be used to both read and write. +type Buffer codec.Buffer + +// NewBuffer creates a new buffer with the given slice of bytes as the +// buffer's initial contents. +func NewBuffer(buf []byte) *Buffer { + return (*Buffer)(codec.NewBuffer(buf)) +} + +// SetDeterministic sets this buffer to encode messages deterministically. This +// is useful for tests. But the overhead is non-zero, so it should not likely be +// used outside of tests. When true, map fields in a message must have their +// keys sorted before serialization to ensure deterministic output. Otherwise, +// values in a map field will be serialized in map iteration order. +func (cb *Buffer) SetDeterministic(deterministic bool) { + (*codec.Buffer)(cb).SetDeterministic(deterministic) +} + +// IsDeterministic returns whether or not this buffer is configured to encode +// messages deterministically. +func (cb *Buffer) IsDeterministic() bool { + return (*codec.Buffer)(cb).IsDeterministic() +} + +// Reset resets this buffer back to empty. Any subsequent writes/encodes +// to the buffer will allocate a new backing slice of bytes. +func (cb *Buffer) Reset() { + (*codec.Buffer)(cb).Reset() +} + +// Bytes returns the slice of bytes remaining in the buffer. Note that +// this does not perform a copy: if the contents of the returned slice +// are modified, the modifications will be visible to subsequent reads +// via the buffer. +func (cb *Buffer) Bytes() []byte { + return (*codec.Buffer)(cb).Bytes() +} + +// String returns the remaining bytes in the buffer as a string. +func (cb *Buffer) String() string { + return (*codec.Buffer)(cb).String() +} + +// EOF returns true if there are no more bytes remaining to read. +func (cb *Buffer) EOF() bool { + return (*codec.Buffer)(cb).EOF() +} + +// Skip attempts to skip the given number of bytes in the input. If +// the input has fewer bytes than the given count, io.ErrUnexpectedEOF +// is returned and the buffer is unchanged. Otherwise, the given number +// of bytes are skipped and nil is returned. +func (cb *Buffer) Skip(count int) error { + return (*codec.Buffer)(cb).Skip(count) + +} + +// Len returns the remaining number of bytes in the buffer. +func (cb *Buffer) Len() int { + return (*codec.Buffer)(cb).Len() +} + +// Read implements the io.Reader interface. If there are no bytes +// remaining in the buffer, it will return 0, io.EOF. Otherwise, +// it reads max(len(dest), cb.Len()) bytes from input and copies +// them into dest. It returns the number of bytes copied and a nil +// error in this case. +func (cb *Buffer) Read(dest []byte) (int, error) { + return (*codec.Buffer)(cb).Read(dest) +} + +var _ io.Reader = (*Buffer)(nil) + +// Write implements the io.Writer interface. It always returns +// len(data), nil. +func (cb *Buffer) Write(data []byte) (int, error) { + return (*codec.Buffer)(cb).Write(data) +} + +var _ io.Writer = (*Buffer)(nil) + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) DecodeVarint() (uint64, error) { + return (*codec.Buffer)(cb).DecodeVarint() +} + +// DecodeTagAndWireType decodes a field tag and wire type from input. +// This reads a varint and then extracts the two fields from the varint +// value read. +func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) { + return (*codec.Buffer)(cb).DecodeTagAndWireType() +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) DecodeFixed64() (x uint64, err error) { + return (*codec.Buffer)(cb).DecodeFixed64() +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) DecodeFixed32() (x uint64, err error) { + return (*codec.Buffer)(cb).DecodeFixed32() +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + return (*codec.Buffer)(cb).DecodeRawBytes(alloc) +} + +// ReadGroup reads the input until a "group end" tag is found +// and returns the data up to that point. Subsequent reads from +// the buffer will read data after the group end tag. If alloc +// is true, the data is copied to a new slice before being returned. +// Otherwise, the returned slice is a view into the buffer's +// underlying byte slice. +// +// This function correctly handles nested groups: if a "group start" +// tag is found, then that group's end tag will be included in the +// returned data. +func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) { + return (*codec.Buffer)(cb).ReadGroup(alloc) +} + +// SkipGroup is like ReadGroup, except that it discards the +// data and just advances the buffer to point to the input +// right *after* the "group end" tag. +func (cb *Buffer) SkipGroup() error { + return (*codec.Buffer)(cb).SkipGroup() +} + +// SkipField attempts to skip the value of a field with the given wire +// type. When consuming a protobuf-encoded stream, it can be called immediately +// after DecodeTagAndWireType to discard the subsequent data for the field. +func (cb *Buffer) SkipField(wireType int8) error { + return (*codec.Buffer)(cb).SkipField(wireType) +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) EncodeVarint(x uint64) error { + return (*codec.Buffer)(cb).EncodeVarint(x) +} + +// EncodeTagAndWireType encodes the given field tag and wire type to the +// buffer. This combines the two values and then writes them as a varint. +func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error { + return (*codec.Buffer)(cb).EncodeTagAndWireType(tag, wireType) +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) EncodeFixed64(x uint64) error { + return (*codec.Buffer)(cb).EncodeFixed64(x) + +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) EncodeFixed32(x uint64) error { + return (*codec.Buffer)(cb).EncodeFixed32(x) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) EncodeRawBytes(b []byte) error { + return (*codec.Buffer)(cb).EncodeRawBytes(b) +} + +// EncodeMessage writes the given message to the buffer. +func (cb *Buffer) EncodeMessage(pm proto.Message) error { + return (*codec.Buffer)(cb).EncodeMessage(pm) +} + +// EncodeDelimitedMessage writes the given message to the buffer with a +// varint-encoded length prefix (the delimiter). +func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error { + return (*codec.Buffer)(cb).EncodeDelimitedMessage(pm) +} diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go new file mode 100644 index 00000000..0edb817c --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go @@ -0,0 +1,318 @@ +package codec + +import ( + "errors" + "fmt" + "io" + "math" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" +) + +var varintTypes = map[descriptorpb.FieldDescriptorProto_Type]bool{} +var fixed32Types = map[descriptorpb.FieldDescriptorProto_Type]bool{} +var fixed64Types = map[descriptorpb.FieldDescriptorProto_Type]bool{} + +func init() { + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_BOOL] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT32] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT64] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT32] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT64] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT32] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT64] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_ENUM] = true + + fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED32] = true + fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED32] = true + fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FLOAT] = true + + fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED64] = true + fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED64] = true + fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_DOUBLE] = true +} + +// ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type +// it reads indicates an end-group marker. +var ErrWireTypeEndGroup = errors.New("unexpected wire type: end group") + +// MessageFactory is used to instantiate messages when DecodeFieldValue needs to +// decode a message value. +// +// Also see MessageFactory in "github.com/jhump/protoreflect/dynamic", which +// implements this interface. +type MessageFactory interface { + NewMessage(md *desc.MessageDescriptor) proto.Message +} + +// UnknownField represents a field that was parsed from the binary wire +// format for a message, but was not a recognized field number. Enough +// information is preserved so that re-serializing the message won't lose +// any of the unrecognized data. +type UnknownField struct { + // The tag number for the unrecognized field. + Tag int32 + + // Encoding indicates how the unknown field was encoded on the wire. If it + // is proto.WireBytes or proto.WireGroupStart then Contents will be set to + // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least + // significant 32 bits of Value. Otherwise, the data is in all 64 bits of + // Value. + Encoding int8 + Contents []byte + Value uint64 +} + +// DecodeZigZag32 decodes a signed 32-bit integer from the given +// zig-zag encoded value. +func DecodeZigZag32(v uint64) int32 { + return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)) +} + +// DecodeZigZag64 decodes a signed 64-bit integer from the given +// zig-zag encoded value. +func DecodeZigZag64(v uint64) int64 { + return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63)) +} + +// DecodeFieldValue will read a field value from the buffer and return its +// value and the corresponding field descriptor. The given function is used +// to lookup a field descriptor by tag number. The given factory is used to +// instantiate a message if the field value is (or contains) a message value. +// +// On error, the field descriptor and value are typically nil. However, if the +// error returned is ErrWireTypeEndGroup, the returned value will indicate any +// tag number encoded in the end-group marker. +// +// If the field descriptor returned is nil, that means that the given function +// returned nil. This is expected to happen for unrecognized tag numbers. In +// that case, no error is returned, and the value will be an UnknownField. +func (cb *Buffer) DecodeFieldValue(fieldFinder func(int32) *desc.FieldDescriptor, fact MessageFactory) (*desc.FieldDescriptor, interface{}, error) { + if cb.EOF() { + return nil, nil, io.EOF + } + tagNumber, wireType, err := cb.DecodeTagAndWireType() + if err != nil { + return nil, nil, err + } + if wireType == proto.WireEndGroup { + return nil, tagNumber, ErrWireTypeEndGroup + } + fd := fieldFinder(tagNumber) + if fd == nil { + val, err := cb.decodeUnknownField(tagNumber, wireType) + return nil, val, err + } + val, err := cb.decodeKnownField(fd, wireType, fact) + return fd, val, err +} + +// DecodeScalarField extracts a properly-typed value from v. The returned value's +// type depends on the given field descriptor type. It will be the same type as +// generated structs use for the field descriptor's type. Enum types will return +// an int32. If the given field type uses length-delimited encoding (nested +// messages, bytes, and strings), an error is returned. +func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) { + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + return v != 0, nil + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return uint32(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_ENUM: + s := int64(v) + if s > math.MaxInt32 || s < math.MinInt32 { + return nil, ErrOverflow + } + return int32(s), nil + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return int32(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_SINT32: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return DecodeZigZag32(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + return v, nil + + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + return int64(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_SINT64: + return DecodeZigZag64(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return math.Float32frombits(uint32(v)), nil + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return math.Float64frombits(v), nil + + default: + // bytes, string, message, and group cannot be represented as a simple numeric value + return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName()) + } +} + +// DecodeLengthDelimitedField extracts a properly-typed value from bytes. The +// returned value's type will usually be []byte, string, or, for nested messages, +// the type returned from the given message factory. However, since repeated +// scalar fields can be length-delimited, when they used packed encoding, it can +// also return an []interface{}, where each element is a scalar value. Furthermore, +// it could return a scalar type, not in a slice, if the given field descriptor is +// not repeated. This is to support cases where a field is changed from optional +// to repeated. New code may emit a packed repeated representation, but old code +// still expects a single scalar value. In this case, if the actual data in bytes +// contains multiple values, only the last value is returned. +func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) { + switch { + case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return bytes, nil + + case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_STRING: + return string(bytes), nil + + case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || + fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP: + msg := mf.NewMessage(fd.GetMessageType()) + err := proto.Unmarshal(bytes, msg) + if err != nil { + return nil, err + } else { + return msg, nil + } + + default: + // even if the field is not repeated or not packed, we still parse it as such for + // backwards compatibility (e.g. message we are de-serializing could have been both + // repeated and packed at the time of serialization) + packedBuf := NewBuffer(bytes) + var slice []interface{} + var val interface{} + for !packedBuf.EOF() { + var v uint64 + var err error + if varintTypes[fd.GetType()] { + v, err = packedBuf.DecodeVarint() + } else if fixed32Types[fd.GetType()] { + v, err = packedBuf.DecodeFixed32() + } else if fixed64Types[fd.GetType()] { + v, err = packedBuf.DecodeFixed64() + } else { + return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName()) + } + if err != nil { + return nil, err + } + val, err = DecodeScalarField(fd, v) + if err != nil { + return nil, err + } + if fd.IsRepeated() { + slice = append(slice, val) + } + } + if fd.IsRepeated() { + return slice, nil + } else { + // if not a repeated field, last value wins + return val, nil + } + } +} + +func (b *Buffer) decodeKnownField(fd *desc.FieldDescriptor, encoding int8, fact MessageFactory) (interface{}, error) { + var val interface{} + var err error + switch encoding { + case proto.WireFixed32: + var num uint64 + num, err = b.DecodeFixed32() + if err == nil { + val, err = DecodeScalarField(fd, num) + } + case proto.WireFixed64: + var num uint64 + num, err = b.DecodeFixed64() + if err == nil { + val, err = DecodeScalarField(fd, num) + } + case proto.WireVarint: + var num uint64 + num, err = b.DecodeVarint() + if err == nil { + val, err = DecodeScalarField(fd, num) + } + + case proto.WireBytes: + alloc := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES + var raw []byte + raw, err = b.DecodeRawBytes(alloc) + if err == nil { + val, err = DecodeLengthDelimitedField(fd, raw, fact) + } + + case proto.WireStartGroup: + if fd.GetMessageType() == nil { + return nil, fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName()) + } + msg := fact.NewMessage(fd.GetMessageType()) + var data []byte + data, err = b.ReadGroup(false) + if err == nil { + err = proto.Unmarshal(data, msg) + if err == nil { + val = msg + } + } + + default: + return nil, ErrBadWireType + } + if err != nil { + return nil, err + } + + return val, nil +} + +func (b *Buffer) decodeUnknownField(tagNumber int32, encoding int8) (interface{}, error) { + u := UnknownField{Tag: tagNumber, Encoding: encoding} + var err error + switch encoding { + case proto.WireFixed32: + u.Value, err = b.DecodeFixed32() + case proto.WireFixed64: + u.Value, err = b.DecodeFixed64() + case proto.WireVarint: + u.Value, err = b.DecodeVarint() + case proto.WireBytes: + u.Contents, err = b.DecodeRawBytes(true) + case proto.WireStartGroup: + u.Contents, err = b.ReadGroup(true) + default: + err = ErrBadWireType + } + if err != nil { + return nil, err + } + return u, nil +} diff --git a/vendor/github.com/jhump/protoreflect/codec/doc.go b/vendor/github.com/jhump/protoreflect/codec/doc.go new file mode 100644 index 00000000..f76499f6 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/doc.go @@ -0,0 +1,7 @@ +// Package codec contains a reader/write type that assists with encoding +// and decoding protobuf's binary representation. +// +// The code in this package began as a fork of proto.Buffer but provides +// additional API to make it more useful to code that needs to dynamically +// process or produce the protobuf binary format. +package codec diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go new file mode 100644 index 00000000..280f730f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go @@ -0,0 +1,288 @@ +package codec + +import ( + "fmt" + "math" + "reflect" + "sort" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" +) + +// EncodeZigZag64 does zig-zag encoding to convert the given +// signed 64-bit integer into a form that can be expressed +// efficiently as a varint, even for negative values. +func EncodeZigZag64(v int64) uint64 { + return (uint64(v) << 1) ^ uint64(v>>63) +} + +// EncodeZigZag32 does zig-zag encoding to convert the given +// signed 32-bit integer into a form that can be expressed +// efficiently as a varint, even for negative values. +func EncodeZigZag32(v int32) uint64 { + return uint64((uint32(v) << 1) ^ uint32((v >> 31))) +} + +func (cb *Buffer) EncodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error { + if fd.IsMap() { + mp := val.(map[interface{}]interface{}) + entryType := fd.GetMessageType() + keyType := entryType.FindFieldByNumber(1) + valType := entryType.FindFieldByNumber(2) + var entryBuffer Buffer + if cb.IsDeterministic() { + entryBuffer.SetDeterministic(true) + keys := make([]interface{}, 0, len(mp)) + for k := range mp { + keys = append(keys, k) + } + sort.Sort(sortable(keys)) + for _, k := range keys { + v := mp[k] + entryBuffer.Reset() + if err := entryBuffer.encodeFieldElement(keyType, k); err != nil { + return err + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || !rv.IsNil() { + if err := entryBuffer.encodeFieldElement(valType, v); err != nil { + return err + } + } + if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil { + return err + } + if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil { + return err + } + } + } else { + for k, v := range mp { + entryBuffer.Reset() + if err := entryBuffer.encodeFieldElement(keyType, k); err != nil { + return err + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || !rv.IsNil() { + if err := entryBuffer.encodeFieldElement(valType, v); err != nil { + return err + } + } + if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil { + return err + } + if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil { + return err + } + } + } + return nil + } else if fd.IsRepeated() { + sl := val.([]interface{}) + wt, err := getWireType(fd.GetType()) + if err != nil { + return err + } + if isPacked(fd) && len(sl) > 0 && + (wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) { + // packed repeated field + var packedBuffer Buffer + for _, v := range sl { + if err := packedBuffer.encodeFieldValue(fd, v); err != nil { + return err + } + } + if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil { + return err + } + return cb.EncodeRawBytes(packedBuffer.Bytes()) + } else { + // non-packed repeated field + for _, v := range sl { + if err := cb.encodeFieldElement(fd, v); err != nil { + return err + } + } + return nil + } + } else { + return cb.encodeFieldElement(fd, val) + } +} + +func isPacked(fd *desc.FieldDescriptor) bool { + opts := fd.AsFieldDescriptorProto().GetOptions() + // if set, use that value + if opts != nil && opts.Packed != nil { + return opts.GetPacked() + } + // if unset: proto2 defaults to false, proto3 to true + return fd.GetFile().IsProto3() +} + +// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64), +// bools, or strings. +type sortable []interface{} + +func (s sortable) Len() int { + return len(s) +} + +func (s sortable) Less(i, j int) bool { + vi := s[i] + vj := s[j] + switch reflect.TypeOf(vi).Kind() { + case reflect.Int32: + return vi.(int32) < vj.(int32) + case reflect.Int64: + return vi.(int64) < vj.(int64) + case reflect.Uint32: + return vi.(uint32) < vj.(uint32) + case reflect.Uint64: + return vi.(uint64) < vj.(uint64) + case reflect.String: + return vi.(string) < vj.(string) + case reflect.Bool: + return !vi.(bool) && vj.(bool) + default: + panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi))) + } +} + +func (s sortable) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (b *Buffer) encodeFieldElement(fd *desc.FieldDescriptor, val interface{}) error { + wt, err := getWireType(fd.GetType()) + if err != nil { + return err + } + if err := b.EncodeTagAndWireType(fd.GetNumber(), wt); err != nil { + return err + } + if err := b.encodeFieldValue(fd, val); err != nil { + return err + } + if wt == proto.WireStartGroup { + return b.EncodeTagAndWireType(fd.GetNumber(), proto.WireEndGroup) + } + return nil +} + +func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error { + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + v := val.(bool) + if v { + return b.EncodeVarint(1) + } + return b.EncodeVarint(0) + + case descriptorpb.FieldDescriptorProto_TYPE_ENUM, + descriptorpb.FieldDescriptorProto_TYPE_INT32: + v := val.(int32) + return b.EncodeVarint(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + v := val.(int32) + return b.EncodeFixed32(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SINT32: + v := val.(int32) + return b.EncodeVarint(EncodeZigZag32(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_UINT32: + v := val.(uint32) + return b.EncodeVarint(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + v := val.(uint32) + return b.EncodeFixed32(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_INT64: + v := val.(int64) + return b.EncodeVarint(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + v := val.(int64) + return b.EncodeFixed64(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SINT64: + v := val.(int64) + return b.EncodeVarint(EncodeZigZag64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_UINT64: + v := val.(uint64) + return b.EncodeVarint(v) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + v := val.(uint64) + return b.EncodeFixed64(v) + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + v := val.(float64) + return b.EncodeFixed64(math.Float64bits(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + v := val.(float32) + return b.EncodeFixed32(uint64(math.Float32bits(v))) + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + v := val.([]byte) + return b.EncodeRawBytes(v) + + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + v := val.(string) + return b.EncodeRawBytes(([]byte)(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE: + return b.EncodeDelimitedMessage(val.(proto.Message)) + + case descriptorpb.FieldDescriptorProto_TYPE_GROUP: + // just append the nested message to this buffer + return b.EncodeMessage(val.(proto.Message)) + // whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag + + default: + return fmt.Errorf("unrecognized field type: %v", fd.GetType()) + } +} + +func getWireType(t descriptorpb.FieldDescriptorProto_Type) (int8, error) { + switch t { + case descriptorpb.FieldDescriptorProto_TYPE_ENUM, + descriptorpb.FieldDescriptorProto_TYPE_BOOL, + descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_UINT64: + return proto.WireVarint, nil + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + return proto.WireFixed32, nil + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return proto.WireFixed64, nil + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES, + descriptorpb.FieldDescriptorProto_TYPE_STRING, + descriptorpb.FieldDescriptorProto_TYPE_MESSAGE: + return proto.WireBytes, nil + + case descriptorpb.FieldDescriptorProto_TYPE_GROUP: + return proto.WireStartGroup, nil + + default: + return 0, ErrBadWireType + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/cache.go b/vendor/github.com/jhump/protoreflect/desc/cache.go new file mode 100644 index 00000000..e67cf494 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/cache.go @@ -0,0 +1,57 @@ +package desc + +import ( + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type descriptorCache interface { + get(protoreflect.Descriptor) Descriptor + put(protoreflect.Descriptor, Descriptor) +} + +type lockingCache struct { + cacheMu sync.RWMutex + cache mapCache +} + +func (c *lockingCache) get(d protoreflect.Descriptor) Descriptor { + c.cacheMu.RLock() + defer c.cacheMu.RUnlock() + return c.cache.get(d) +} + +func (c *lockingCache) put(key protoreflect.Descriptor, val Descriptor) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + c.cache.put(key, val) +} + +func (c *lockingCache) withLock(fn func(descriptorCache)) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + // Pass the underlying mapCache. We don't want fn to use + // c.get or c.put sine we already have the lock. So those + // methods would try to re-acquire and then deadlock! + fn(c.cache) +} + +type mapCache map[protoreflect.Descriptor]Descriptor + +func (c mapCache) get(d protoreflect.Descriptor) Descriptor { + return c[d] +} + +func (c mapCache) put(key protoreflect.Descriptor, val Descriptor) { + c[key] = val +} + +type noopCache struct{} + +func (noopCache) get(protoreflect.Descriptor) Descriptor { + return nil +} + +func (noopCache) put(protoreflect.Descriptor, Descriptor) { +} diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go new file mode 100644 index 00000000..9aa72a32 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/convert.go @@ -0,0 +1,296 @@ +package desc + +import ( + "errors" + "fmt" + "strings" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/internal" + intn "github.com/jhump/protoreflect/internal" +) + +// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto. +// The file's direct dependencies must be provided. If the given dependencies do not include +// all of the file's dependencies or if the contents of the descriptors are internally +// inconsistent (e.g. contain unresolvable symbols) then an error is returned. +func CreateFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) { + return createFileDescriptor(fd, deps, nil) +} + +type descResolver struct { + files []*FileDescriptor + importResolver *ImportResolver + fromPath string +} + +func (r *descResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + resolvedPath := r.importResolver.ResolveImport(r.fromPath, path) + d := r.findFileByPath(resolvedPath) + if d != nil { + return d, nil + } + if resolvedPath != path { + d := r.findFileByPath(path) + if d != nil { + return d, nil + } + } + return nil, protoregistry.NotFound +} + +func (r *descResolver) findFileByPath(path string) protoreflect.FileDescriptor { + for _, fd := range r.files { + if fd.GetName() == path { + return fd.UnwrapFile() + } + } + return nil +} + +func (r *descResolver) FindDescriptorByName(n protoreflect.FullName) (protoreflect.Descriptor, error) { + for _, fd := range r.files { + d := fd.FindSymbol(string(n)) + if d != nil { + return d.(DescriptorWrapper).Unwrap(), nil + } + } + return nil, protoregistry.NotFound +} + +func createFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) { + dr := &descResolver{files: deps, importResolver: r, fromPath: fd.GetName()} + d, err := protodesc.NewFile(fd, dr) + if err != nil { + return nil, err + } + + // make sure cache has dependencies populated + cache := mapCache{} + for _, dep := range deps { + fd, err := dr.FindFileByPath(dep.GetName()) + if err != nil { + return nil, err + } + cache.put(fd, dep) + } + + return convertFile(d, fd, cache) +} + +func convertFile(d protoreflect.FileDescriptor, fd *descriptorpb.FileDescriptorProto, cache descriptorCache) (*FileDescriptor, error) { + ret := &FileDescriptor{ + wrapped: d, + proto: fd, + symbols: map[string]Descriptor{}, + fieldIndex: map[string]map[int32]*FieldDescriptor{}, + } + cache.put(d, ret) + + // populate references to file descriptor dependencies + ret.deps = make([]*FileDescriptor, len(fd.GetDependency())) + for i := 0; i < d.Imports().Len(); i++ { + f := d.Imports().Get(i).FileDescriptor + if c := cache.get(f); c != nil { + ret.deps[i] = c.(*FileDescriptor) + } else if c, err := wrapFile(f, cache); err != nil { + return nil, err + } else { + ret.deps[i] = c + } + } + ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency())) + for i, pd := range fd.GetPublicDependency() { + ret.publicDeps[i] = ret.deps[pd] + } + ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency())) + for i, wd := range fd.GetWeakDependency() { + ret.weakDeps[i] = ret.deps[wd] + } + + // populate all tables of child descriptors + path := make([]int32, 1, 8) + path[0] = internal.File_messagesTag + for i := 0; i < d.Messages().Len(); i++ { + src := d.Messages().Get(i) + srcProto := fd.GetMessageType()[src.Index()] + md := createMessageDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i))) + ret.symbols[string(src.FullName())] = md + ret.messages = append(ret.messages, md) + } + path[0] = internal.File_enumsTag + for i := 0; i < d.Enums().Len(); i++ { + src := d.Enums().Get(i) + srcProto := fd.GetEnumType()[src.Index()] + ed := createEnumDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i))) + ret.symbols[string(src.FullName())] = ed + ret.enums = append(ret.enums, ed) + } + path[0] = internal.File_extensionsTag + for i := 0; i < d.Extensions().Len(); i++ { + src := d.Extensions().Get(i) + srcProto := fd.GetExtension()[src.Index()] + exd := createFieldDescriptor(ret, ret, src, srcProto, cache, append(path, int32(i))) + ret.symbols[string(src.FullName())] = exd + ret.extensions = append(ret.extensions, exd) + } + path[0] = internal.File_servicesTag + for i := 0; i < d.Services().Len(); i++ { + src := d.Services().Get(i) + srcProto := fd.GetService()[src.Index()] + sd := createServiceDescriptor(ret, src, srcProto, ret.symbols, append(path, int32(i))) + ret.symbols[string(src.FullName())] = sd + ret.services = append(ret.services, sd) + } + + ret.sourceInfo = internal.CreateSourceInfoMap(fd) + ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo + + // now we can resolve all type references and source code info + for _, md := range ret.messages { + if err := md.resolve(cache); err != nil { + return nil, err + } + } + path[0] = internal.File_extensionsTag + for _, exd := range ret.extensions { + if err := exd.resolve(cache); err != nil { + return nil, err + } + } + path[0] = internal.File_servicesTag + for _, sd := range ret.services { + if err := sd.resolve(cache); err != nil { + return nil, err + } + } + + return ret, nil +} + +// CreateFileDescriptors constructs a set of descriptors, one for each of the +// given descriptor protos. The given set of descriptor protos must include all +// transitive dependencies for every file. +func CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) { + return createFileDescriptors(fds, nil) +} + +func createFileDescriptors(fds []*descriptorpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) { + if len(fds) == 0 { + return nil, nil + } + files := map[string]*descriptorpb.FileDescriptorProto{} + resolved := map[string]*FileDescriptor{} + var name string + for _, fd := range fds { + name = fd.GetName() + files[name] = fd + } + for _, fd := range fds { + _, err := createFromSet(fd.GetName(), r, nil, files, resolved) + if err != nil { + return nil, err + } + } + return resolved, nil +} + +// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given +// file descriptors and their transitive dependencies. The files are topologically sorted +// so that a file will always appear after its dependencies. +func ToFileDescriptorSet(fds ...*FileDescriptor) *descriptorpb.FileDescriptorSet { + var fdps []*descriptorpb.FileDescriptorProto + addAllFiles(fds, &fdps, map[string]struct{}{}) + return &descriptorpb.FileDescriptorSet{File: fdps} +} + +func addAllFiles(src []*FileDescriptor, results *[]*descriptorpb.FileDescriptorProto, seen map[string]struct{}) { + for _, fd := range src { + if _, ok := seen[fd.GetName()]; ok { + continue + } + seen[fd.GetName()] = struct{}{} + addAllFiles(fd.GetDependencies(), results, seen) + *results = append(*results, fd.AsFileDescriptorProto()) + } +} + +// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The +// set's *last* file will be the returned descriptor. The set's remaining files must comprise +// the full set of transitive dependencies of that last file. This is the same format and +// order used by protoc when emitting a FileDescriptorSet file with an invocation like so: +// +// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto +func CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) { + return createFileDescriptorFromSet(fds, nil) +} + +func createFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) { + result, err := createFileDescriptorsFromSet(fds, r) + if err != nil { + return nil, err + } + files := fds.GetFile() + lastFilename := files[len(files)-1].GetName() + return result[lastFilename], nil +} + +// CreateFileDescriptorsFromSet creates file descriptors from the given file descriptor set. +// The returned map includes all files in the set, keyed b name. The set must include the +// full set of transitive dependencies for all files therein or else a link error will occur +// and be returned instead of the slice of descriptors. This is the same format used by +// protoc when a FileDescriptorSet file with an invocation like so: +// +// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto +func CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) { + return createFileDescriptorsFromSet(fds, nil) +} + +func createFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) { + files := fds.GetFile() + if len(files) == 0 { + return nil, errors.New("file descriptor set is empty") + } + return createFileDescriptors(files, r) +} + +// createFromSet creates a descriptor for the given filename. It recursively +// creates descriptors for the given file's dependencies. +func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*descriptorpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) { + for _, s := range seen { + if filename == s { + return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> ")) + } + } + seen = append(seen, filename) + + if d, ok := resolved[filename]; ok { + return d, nil + } + fdp := files[filename] + if fdp == nil { + return nil, intn.ErrNoSuchFile(filename) + } + deps := make([]*FileDescriptor, len(fdp.GetDependency())) + for i, depName := range fdp.GetDependency() { + resolvedDep := r.ResolveImport(filename, depName) + dep, err := createFromSet(resolvedDep, r, seen, files, resolved) + if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName { + dep, err = createFromSet(depName, r, seen, files, resolved) + } + if err != nil { + return nil, err + } + deps[i] = dep + } + d, err := createFileDescriptor(fdp, deps, r) + if err != nil { + return nil, err + } + resolved[filename] = d + return d, nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go new file mode 100644 index 00000000..6903a3ab --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go @@ -0,0 +1,1835 @@ +package desc + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/internal" +) + +// Descriptor is the common interface implemented by all descriptor objects. +type Descriptor interface { + // GetName returns the name of the object described by the descriptor. This will + // be a base name that does not include enclosing message names or the package name. + // For file descriptors, this indicates the path and name to the described file. + GetName() string + // GetFullyQualifiedName returns the fully-qualified name of the object described by + // the descriptor. This will include the package name and any enclosing message names. + // For file descriptors, this returns the path and name to the described file (same as + // GetName). + GetFullyQualifiedName() string + // GetParent returns the enclosing element in a proto source file. If the described + // object is a top-level object, this returns the file descriptor. Otherwise, it returns + // the element in which the described object was declared. File descriptors have no + // parent and return nil. + GetParent() Descriptor + // GetFile returns the file descriptor in which this element was declared. File + // descriptors return themselves. + GetFile() *FileDescriptor + // GetOptions returns the options proto containing options for the described element. + GetOptions() proto.Message + // GetSourceInfo returns any source code information that was present in the file + // descriptor. Source code info is optional. If no source code info is available for + // the element (including if there is none at all in the file descriptor) then this + // returns nil + GetSourceInfo() *descriptorpb.SourceCodeInfo_Location + // AsProto returns the underlying descriptor proto for this descriptor. + AsProto() proto.Message +} + +type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc + +// FileDescriptor describes a proto source file. +type FileDescriptor struct { + wrapped protoreflect.FileDescriptor + proto *descriptorpb.FileDescriptorProto + symbols map[string]Descriptor + deps []*FileDescriptor + publicDeps []*FileDescriptor + weakDeps []*FileDescriptor + messages []*MessageDescriptor + enums []*EnumDescriptor + extensions []*FieldDescriptor + services []*ServiceDescriptor + fieldIndex map[string]map[int32]*FieldDescriptor + sourceInfo internal.SourceInfoMap + sourceInfoRecomputeFunc +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapFile, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (fd *FileDescriptor) Unwrap() protoreflect.Descriptor { + return fd.wrapped +} + +// UnwrapFile returns the underlying protoreflect.FileDescriptor. +func (fd *FileDescriptor) UnwrapFile() protoreflect.FileDescriptor { + return fd.wrapped +} + +func (fd *FileDescriptor) recomputeSourceInfo() { + internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo) +} + +func (fd *FileDescriptor) registerField(field *FieldDescriptor) { + fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()] + if fields == nil { + fields = map[int32]*FieldDescriptor{} + fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields + } + fields[field.GetNumber()] = field +} + +// GetName returns the name of the file, as it was given to the protoc invocation +// to compile it, possibly including path (relative to a directory in the proto +// import path). +func (fd *FileDescriptor) GetName() string { + return fd.wrapped.Path() +} + +// GetFullyQualifiedName returns the name of the file, same as GetName. It is +// present to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetFullyQualifiedName() string { + return fd.wrapped.Path() +} + +// GetPackage returns the name of the package declared in the file. +func (fd *FileDescriptor) GetPackage() string { + return string(fd.wrapped.Package()) +} + +// GetParent always returns nil: files are the root of descriptor hierarchies. +// Is it present to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetParent() Descriptor { + return nil +} + +// GetFile returns the receiver, which is a file descriptor. This is present +// to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetFile() *FileDescriptor { + return fd +} + +// GetOptions returns the file's options. Most usages will be more interested +// in GetFileOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetOptions() proto.Message { + return fd.proto.GetOptions() +} + +// GetFileOptions returns the file's options. +func (fd *FileDescriptor) GetFileOptions() *descriptorpb.FileOptions { + return fd.proto.GetOptions() +} + +// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor +// interface. +func (fd *FileDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return nil +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsFileDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (fd *FileDescriptor) AsProto() proto.Message { + return fd.proto +} + +// AsFileDescriptorProto returns the underlying descriptor proto. +func (fd *FileDescriptor) AsFileDescriptorProto() *descriptorpb.FileDescriptorProto { + return fd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (fd *FileDescriptor) String() string { + return fd.proto.String() +} + +// IsProto3 returns true if the file declares a syntax of "proto3". +func (fd *FileDescriptor) IsProto3() bool { + return fd.wrapped.Syntax() == protoreflect.Proto3 +} + +// GetDependencies returns all of this file's dependencies. These correspond to +// import statements in the file. +func (fd *FileDescriptor) GetDependencies() []*FileDescriptor { + return fd.deps +} + +// GetPublicDependencies returns all of this file's public dependencies. These +// correspond to public import statements in the file. +func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor { + return fd.publicDeps +} + +// GetWeakDependencies returns all of this file's weak dependencies. These +// correspond to weak import statements in the file. +func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor { + return fd.weakDeps +} + +// GetMessageTypes returns all top-level messages declared in this file. +func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor { + return fd.messages +} + +// GetEnumTypes returns all top-level enums declared in this file. +func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor { + return fd.enums +} + +// GetExtensions returns all top-level extensions declared in this file. +func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor { + return fd.extensions +} + +// GetServices returns all services declared in this file. +func (fd *FileDescriptor) GetServices() []*ServiceDescriptor { + return fd.services +} + +// FindSymbol returns the descriptor contained within this file for the +// element with the given fully-qualified symbol name. If no such element +// exists then this method returns nil. +func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor { + if len(symbol) == 0 { + return nil + } + if symbol[0] == '.' { + symbol = symbol[1:] + } + if ret := fd.symbols[symbol]; ret != nil { + return ret + } + + // allow accessing symbols through public imports, too + for _, dep := range fd.GetPublicDependencies() { + if ret := dep.FindSymbol(symbol); ret != nil { + return ret + } + } + + // not found + return nil +} + +// FindMessage finds the message with the given fully-qualified name. If no +// such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor { + if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok { + return md + } else { + return nil + } +} + +// FindEnum finds the enum with the given fully-qualified name. If no such +// element exists in this file then nil is returned. +func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor { + if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok { + return ed + } else { + return nil + } +} + +// FindService finds the service with the given fully-qualified name. If no +// such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor { + if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok { + return sd + } else { + return nil + } +} + +// FindExtension finds the extension field for the given extended type name and +// tag number. If no such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor { + if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() { + return exd + } else { + return nil + } +} + +// FindExtensionByName finds the extension field with the given fully-qualified +// name. If no such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor { + if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() { + return exd + } else { + return nil + } +} + +// MessageDescriptor describes a protocol buffer message. +type MessageDescriptor struct { + wrapped protoreflect.MessageDescriptor + proto *descriptorpb.DescriptorProto + parent Descriptor + file *FileDescriptor + fields []*FieldDescriptor + nested []*MessageDescriptor + enums []*EnumDescriptor + extensions []*FieldDescriptor + oneOfs []*OneOfDescriptor + extRanges extRanges + sourceInfoPath []int32 + jsonNames jsonNameMap +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapMessage, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (md *MessageDescriptor) Unwrap() protoreflect.Descriptor { + return md.wrapped +} + +// UnwrapMessage returns the underlying protoreflect.MessageDescriptor. +func (md *MessageDescriptor) UnwrapMessage() protoreflect.MessageDescriptor { + return md.wrapped +} + +func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, md protoreflect.MessageDescriptor, mdp *descriptorpb.DescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *MessageDescriptor { + ret := &MessageDescriptor{ + wrapped: md, + proto: mdp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + cache.put(md, ret) + path = append(path, internal.Message_nestedMessagesTag) + for i := 0; i < md.Messages().Len(); i++ { + src := md.Messages().Get(i) + srcProto := mdp.GetNestedType()[src.Index()] + nmd := createMessageDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i))) + symbols[string(src.FullName())] = nmd + ret.nested = append(ret.nested, nmd) + } + path[len(path)-1] = internal.Message_enumsTag + for i := 0; i < md.Enums().Len(); i++ { + src := md.Enums().Get(i) + srcProto := mdp.GetEnumType()[src.Index()] + ed := createEnumDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i))) + symbols[string(src.FullName())] = ed + ret.enums = append(ret.enums, ed) + } + path[len(path)-1] = internal.Message_fieldsTag + for i := 0; i < md.Fields().Len(); i++ { + src := md.Fields().Get(i) + srcProto := mdp.GetField()[src.Index()] + fld := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i))) + symbols[string(src.FullName())] = fld + ret.fields = append(ret.fields, fld) + } + path[len(path)-1] = internal.Message_extensionsTag + for i := 0; i < md.Extensions().Len(); i++ { + src := md.Extensions().Get(i) + srcProto := mdp.GetExtension()[src.Index()] + exd := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i))) + symbols[string(src.FullName())] = exd + ret.extensions = append(ret.extensions, exd) + } + path[len(path)-1] = internal.Message_oneOfsTag + for i := 0; i < md.Oneofs().Len(); i++ { + src := md.Oneofs().Get(i) + srcProto := mdp.GetOneofDecl()[src.Index()] + od := createOneOfDescriptor(fd, ret, i, src, srcProto, append(path, int32(i))) + symbols[string(src.FullName())] = od + ret.oneOfs = append(ret.oneOfs, od) + } + for _, r := range mdp.GetExtensionRange() { + // proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code). + // but protoc converts range to exclusive end in descriptor, so we must convert back + end := r.GetEnd() - 1 + ret.extRanges = append(ret.extRanges, proto.ExtensionRange{ + Start: r.GetStart(), + End: end}) + } + sort.Sort(ret.extRanges) + + return ret +} + +func (md *MessageDescriptor) resolve(cache descriptorCache) error { + for _, nmd := range md.nested { + if err := nmd.resolve(cache); err != nil { + return err + } + } + for _, fld := range md.fields { + if err := fld.resolve(cache); err != nil { + return err + } + } + for _, exd := range md.extensions { + if err := exd.resolve(cache); err != nil { + return err + } + } + return nil +} + +// GetName returns the simple (unqualified) name of the message. +func (md *MessageDescriptor) GetName() string { + return string(md.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the message. This +// includes the package name (if there is one) as well as the names of any +// enclosing messages. +func (md *MessageDescriptor) GetFullyQualifiedName() string { + return string(md.wrapped.FullName()) +} + +// GetParent returns the message's enclosing descriptor. For top-level messages, +// this will be a file descriptor. Otherwise it will be the descriptor for the +// enclosing message. +func (md *MessageDescriptor) GetParent() Descriptor { + return md.parent +} + +// GetFile returns the descriptor for the file in which this message is defined. +func (md *MessageDescriptor) GetFile() *FileDescriptor { + return md.file +} + +// GetOptions returns the message's options. Most usages will be more interested +// in GetMessageOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (md *MessageDescriptor) GetOptions() proto.Message { + return md.proto.GetOptions() +} + +// GetMessageOptions returns the message's options. +func (md *MessageDescriptor) GetMessageOptions() *descriptorpb.MessageOptions { + return md.proto.GetOptions() +} + +// GetSourceInfo returns source info for the message, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// message was defined and also contains comments associated with the message +// definition. +func (md *MessageDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return md.file.sourceInfo.Get(md.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (md *MessageDescriptor) AsProto() proto.Message { + return md.proto +} + +// AsDescriptorProto returns the underlying descriptor proto. +func (md *MessageDescriptor) AsDescriptorProto() *descriptorpb.DescriptorProto { + return md.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (md *MessageDescriptor) String() string { + return md.proto.String() +} + +// IsMapEntry returns true if this is a synthetic message type that represents an entry +// in a map field. +func (md *MessageDescriptor) IsMapEntry() bool { + return md.wrapped.IsMapEntry() +} + +// GetFields returns all of the fields for this message. +func (md *MessageDescriptor) GetFields() []*FieldDescriptor { + return md.fields +} + +// GetNestedMessageTypes returns all of the message types declared inside this message. +func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor { + return md.nested +} + +// GetNestedEnumTypes returns all of the enums declared inside this message. +func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor { + return md.enums +} + +// GetNestedExtensions returns all of the extensions declared inside this message. +func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor { + return md.extensions +} + +// GetOneOfs returns all of the one-of field sets declared inside this message. +func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor { + return md.oneOfs +} + +// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3". +func (md *MessageDescriptor) IsProto3() bool { + return md.file.IsProto3() +} + +// GetExtensionRanges returns the ranges of extension field numbers for this message. +func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange { + return md.extRanges +} + +// IsExtendable returns true if this message has any extension ranges. +func (md *MessageDescriptor) IsExtendable() bool { + return len(md.extRanges) > 0 +} + +// IsExtension returns true if the given tag number is within any of this message's +// extension ranges. +func (md *MessageDescriptor) IsExtension(tagNumber int32) bool { + return md.extRanges.IsExtension(tagNumber) +} + +type extRanges []proto.ExtensionRange + +func (er extRanges) String() string { + var buf bytes.Buffer + first := true + for _, r := range er { + if first { + first = false + } else { + buf.WriteString(",") + } + fmt.Fprintf(&buf, "%d..%d", r.Start, r.End) + } + return buf.String() +} + +func (er extRanges) IsExtension(tagNumber int32) bool { + i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber }) + return i < len(er) && tagNumber >= er[i].Start +} + +func (er extRanges) Len() int { + return len(er) +} + +func (er extRanges) Less(i, j int) bool { + return er[i].Start < er[j].Start +} + +func (er extRanges) Swap(i, j int) { + er[i], er[j] = er[j], er[i] +} + +// FindFieldByName finds the field with the given name. If no such field exists +// then nil is returned. Only regular fields are returned, not extensions. +func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor { + fqn := fmt.Sprintf("%s.%s", md.GetFullyQualifiedName(), fieldName) + if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() { + return fd + } else { + return nil + } +} + +// FindFieldByNumber finds the field with the given tag number. If no such field +// exists then nil is returned. Only regular fields are returned, not extensions. +func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor { + if fd, ok := md.file.fieldIndex[md.GetFullyQualifiedName()][tagNumber]; ok && !fd.IsExtension() { + return fd + } else { + return nil + } +} + +// FieldDescriptor describes a field of a protocol buffer message. +type FieldDescriptor struct { + wrapped protoreflect.FieldDescriptor + proto *descriptorpb.FieldDescriptorProto + parent Descriptor + owner *MessageDescriptor + file *FileDescriptor + oneOf *OneOfDescriptor + msgType *MessageDescriptor + enumType *EnumDescriptor + sourceInfoPath []int32 + def memoizedDefault +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapField, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (fd *FieldDescriptor) Unwrap() protoreflect.Descriptor { + return fd.wrapped +} + +// UnwrapField returns the underlying protoreflect.FieldDescriptor. +func (fd *FieldDescriptor) UnwrapField() protoreflect.FieldDescriptor { + return fd.wrapped +} + +func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, fld protoreflect.FieldDescriptor, fldp *descriptorpb.FieldDescriptorProto, cache descriptorCache, path []int32) *FieldDescriptor { + ret := &FieldDescriptor{ + wrapped: fld, + proto: fldp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + cache.put(fld, ret) + if !fld.IsExtension() { + ret.owner = parent.(*MessageDescriptor) + } + // owner for extensions, field type (be it message or enum), and one-ofs get resolved later + return ret +} + +func descriptorType(d Descriptor) string { + switch d := d.(type) { + case *FileDescriptor: + return "a file" + case *MessageDescriptor: + return "a message" + case *FieldDescriptor: + if d.IsExtension() { + return "an extension" + } + return "a field" + case *OneOfDescriptor: + return "a oneof" + case *EnumDescriptor: + return "an enum" + case *EnumValueDescriptor: + return "an enum value" + case *ServiceDescriptor: + return "a service" + case *MethodDescriptor: + return "a method" + default: + return fmt.Sprintf("a %T", d) + } +} + +func (fd *FieldDescriptor) resolve(cache descriptorCache) error { + if fd.proto.OneofIndex != nil && fd.oneOf == nil { + return fmt.Errorf("could not link field %s to one-of index %d", fd.GetFullyQualifiedName(), *fd.proto.OneofIndex) + } + if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { + desc, err := resolve(fd.file, fd.wrapped.Enum(), cache) + if err != nil { + return err + } + enumType, ok := desc.(*EnumDescriptor) + if !ok { + return fmt.Errorf("field %v indicates a type of enum, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc)) + } + fd.enumType = enumType + } + if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + desc, err := resolve(fd.file, fd.wrapped.Message(), cache) + if err != nil { + return err + } + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("field %v indicates a type of message, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc)) + } + fd.msgType = msgType + } + if fd.IsExtension() { + desc, err := resolve(fd.file, fd.wrapped.ContainingMessage(), cache) + if err != nil { + return err + } + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("field %v extends %q which should be a message but is %s", fd.GetFullyQualifiedName(), fd.proto.GetExtendee(), descriptorType(desc)) + } + fd.owner = msgType + } + fd.file.registerField(fd) + return nil +} + +func (fd *FieldDescriptor) determineDefault() interface{} { + if fd.IsMap() { + return map[interface{}]interface{}(nil) + } else if fd.IsRepeated() { + return []interface{}(nil) + } else if fd.msgType != nil { + return nil + } + + proto3 := fd.file.IsProto3() + if !proto3 { + def := fd.AsFieldDescriptorProto().GetDefaultValue() + if def != "" { + ret := parseDefaultValue(fd, def) + if ret != nil { + return ret + } + // if we can't parse default value, fall-through to return normal default... + } + } + + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + descriptorpb.FieldDescriptorProto_TYPE_UINT32: + return uint32(0) + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32: + return int32(0) + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + descriptorpb.FieldDescriptorProto_TYPE_UINT64: + return uint64(0) + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64: + return int64(0) + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + return float32(0.0) + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return float64(0.0) + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + return false + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return []byte(nil) + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return "" + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + if proto3 { + return int32(0) + } + enumVals := fd.GetEnumType().GetValues() + if len(enumVals) > 0 { + return enumVals[0].GetNumber() + } else { + return int32(0) // WTF? + } + default: + panic(fmt.Sprintf("Unknown field type: %v", fd.GetType())) + } +} + +func parseDefaultValue(fd *FieldDescriptor, val string) interface{} { + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + vd := fd.GetEnumType().FindValueByName(val) + if vd != nil { + return vd.GetNumber() + } + return nil + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if val == "true" { + return true + } else if val == "false" { + return false + } + return nil + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return []byte(unescape(val)) + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return val + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if f, err := strconv.ParseFloat(val, 32); err == nil { + return float32(f) + } else { + return float32(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + if f, err := strconv.ParseFloat(val, 64); err == nil { + return f + } else { + return float64(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if i, err := strconv.ParseInt(val, 10, 32); err == nil { + return int32(i) + } else { + return int32(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if i, err := strconv.ParseUint(val, 10, 32); err == nil { + return uint32(i) + } else { + return uint32(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + if i, err := strconv.ParseInt(val, 10, 64); err == nil { + return i + } else { + return int64(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + if i, err := strconv.ParseUint(val, 10, 64); err == nil { + return i + } else { + return uint64(0) + } + default: + return nil + } +} + +func unescape(s string) string { + // protoc encodes default values for 'bytes' fields using C escaping, + // so this function reverses that escaping + out := make([]byte, 0, len(s)) + var buf [4]byte + for len(s) > 0 { + if s[0] != '\\' || len(s) < 2 { + // not escape sequence, or too short to be well-formed escape + out = append(out, s[0]) + s = s[1:] + } else if s[1] == 'x' || s[1] == 'X' { + n := matchPrefix(s[2:], 2, isHex) + if n == 0 { + // bad escape + out = append(out, s[:2]...) + s = s[2:] + } else { + c, err := strconv.ParseUint(s[2:2+n], 16, 8) + if err != nil { + // shouldn't really happen... + out = append(out, s[:2+n]...) + } else { + out = append(out, byte(c)) + } + s = s[2+n:] + } + } else if s[1] >= '0' && s[1] <= '7' { + n := 1 + matchPrefix(s[2:], 2, isOctal) + c, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil || c > 0xff { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(c)) + } + s = s[1+n:] + } else if s[1] == 'u' { + if len(s) < 6 { + // bad escape + out = append(out, s...) + s = s[len(s):] + } else { + c, err := strconv.ParseUint(s[2:6], 16, 16) + if err != nil { + // bad escape + out = append(out, s[:6]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + s = s[6:] + } + } else if s[1] == 'U' { + if len(s) < 10 { + // bad escape + out = append(out, s...) + s = s[len(s):] + } else { + c, err := strconv.ParseUint(s[2:10], 16, 32) + if err != nil || c > 0x10ffff { + // bad escape + out = append(out, s[:10]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + s = s[10:] + } + } else { + switch s[1] { + case 'a': + out = append(out, '\a') + case 'b': + out = append(out, '\b') + case 'f': + out = append(out, '\f') + case 'n': + out = append(out, '\n') + case 'r': + out = append(out, '\r') + case 't': + out = append(out, '\t') + case 'v': + out = append(out, '\v') + case '\\': + out = append(out, '\\') + case '\'': + out = append(out, '\'') + case '"': + out = append(out, '"') + case '?': + out = append(out, '?') + default: + // invalid escape, just copy it as-is + out = append(out, s[:2]...) + } + s = s[2:] + } + } + return string(out) +} + +func isOctal(b byte) bool { return b >= '0' && b <= '7' } +func isHex(b byte) bool { + return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F') +} +func matchPrefix(s string, limit int, fn func(byte) bool) int { + l := len(s) + if l > limit { + l = limit + } + i := 0 + for ; i < l; i++ { + if !fn(s[i]) { + return i + } + } + return i +} + +// GetName returns the name of the field. +func (fd *FieldDescriptor) GetName() string { + return string(fd.wrapped.Name()) +} + +// GetNumber returns the tag number of this field. +func (fd *FieldDescriptor) GetNumber() int32 { + return int32(fd.wrapped.Number()) +} + +// GetFullyQualifiedName returns the fully qualified name of the field. Unlike +// GetName, this includes fully qualified name of the enclosing message for +// regular fields. +// +// For extension fields, this includes the package (if there is one) as well as +// any enclosing messages. The package and/or enclosing messages are for where +// the extension is defined, not the message it extends. +// +// If this field is part of a one-of, the fully qualified name does *not* +// include the name of the one-of, only of the enclosing message. +func (fd *FieldDescriptor) GetFullyQualifiedName() string { + return string(fd.wrapped.FullName()) +} + +// GetParent returns the fields's enclosing descriptor. For normal +// (non-extension) fields, this is the enclosing message. For extensions, this +// is the descriptor in which the extension is defined, not the message that is +// extended. The parent for an extension may be a file descriptor or a message, +// depending on where the extension is defined. +func (fd *FieldDescriptor) GetParent() Descriptor { + return fd.parent +} + +// GetFile returns the descriptor for the file in which this field is defined. +func (fd *FieldDescriptor) GetFile() *FileDescriptor { + return fd.file +} + +// GetOptions returns the field's options. Most usages will be more interested +// in GetFieldOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (fd *FieldDescriptor) GetOptions() proto.Message { + return fd.proto.GetOptions() +} + +// GetFieldOptions returns the field's options. +func (fd *FieldDescriptor) GetFieldOptions() *descriptorpb.FieldOptions { + return fd.proto.GetOptions() +} + +// GetSourceInfo returns source info for the field, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// field was defined and also contains comments associated with the field +// definition. +func (fd *FieldDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return fd.file.sourceInfo.Get(fd.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsFieldDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (fd *FieldDescriptor) AsProto() proto.Message { + return fd.proto +} + +// AsFieldDescriptorProto returns the underlying descriptor proto. +func (fd *FieldDescriptor) AsFieldDescriptorProto() *descriptorpb.FieldDescriptorProto { + return fd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (fd *FieldDescriptor) String() string { + return fd.proto.String() +} + +// GetJSONName returns the name of the field as referenced in the message's JSON +// format. +func (fd *FieldDescriptor) GetJSONName() string { + if jsonName := fd.proto.JsonName; jsonName != nil { + // if json name is present, use its value + return *jsonName + } + // otherwise, compute the proper JSON name from the field name + return jsonCamelCase(fd.proto.GetName()) +} + +func jsonCamelCase(s string) string { + // This mirrors the implementation in protoc/C++ runtime and in the Java runtime: + // https://github.com/protocolbuffers/protobuf/blob/a104dffcb6b1958a424f5fa6f9e6bdc0ab9b6f9e/src/google/protobuf/descriptor.cc#L276 + // https://github.com/protocolbuffers/protobuf/blob/a1c886834425abb64a966231dd2c9dd84fb289b3/java/core/src/main/java/com/google/protobuf/Descriptors.java#L1286 + var buf bytes.Buffer + prevWasUnderscore := false + for _, r := range s { + if r == '_' { + prevWasUnderscore = true + continue + } + if prevWasUnderscore { + r = unicode.ToUpper(r) + prevWasUnderscore = false + } + buf.WriteRune(r) + } + return buf.String() +} + +// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName), +// but includes the fully qualified name of the enclosing message. +// +// If the field is an extension, it will return the package name (if there is +// one) as well as the names of any enclosing messages. The package and/or +// enclosing messages are for where the extension is defined, not the message it +// extends. +func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string { + parent := fd.GetParent() + switch parent := parent.(type) { + case *FileDescriptor: + pkg := parent.GetPackage() + if pkg == "" { + return fd.GetJSONName() + } + return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName()) + default: + return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName()) + } +} + +// GetOwner returns the message type that this field belongs to. If this is a normal +// field then this is the same as GetParent. But for extensions, this will be the +// extendee message whereas GetParent refers to where the extension was declared. +func (fd *FieldDescriptor) GetOwner() *MessageDescriptor { + return fd.owner +} + +// IsExtension returns true if this is an extension field. +func (fd *FieldDescriptor) IsExtension() bool { + return fd.wrapped.IsExtension() +} + +// GetOneOf returns the one-of field set to which this field belongs. If this field +// is not part of a one-of then this method returns nil. +func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor { + return fd.oneOf +} + +// GetType returns the type of this field. If the type indicates an enum, the +// enum type can be queried via GetEnumType. If the type indicates a message, the +// message type can be queried via GetMessageType. +func (fd *FieldDescriptor) GetType() descriptorpb.FieldDescriptorProto_Type { + return fd.proto.GetType() +} + +// GetLabel returns the label for this field. The label can be required (proto2-only), +// optional (default for proto3), or required. +func (fd *FieldDescriptor) GetLabel() descriptorpb.FieldDescriptorProto_Label { + return fd.proto.GetLabel() +} + +// IsRequired returns true if this field has the "required" label. +func (fd *FieldDescriptor) IsRequired() bool { + return fd.wrapped.Cardinality() == protoreflect.Required +} + +// IsRepeated returns true if this field has the "repeated" label. +func (fd *FieldDescriptor) IsRepeated() bool { + return fd.wrapped.Cardinality() == protoreflect.Repeated +} + +// IsProto3Optional returns true if this field has an explicit "optional" label +// and is in a "proto3" syntax file. Such fields, if they are normal fields (not +// extensions), will be nested in synthetic oneofs that contain only the single +// field. +func (fd *FieldDescriptor) IsProto3Optional() bool { + return fd.proto.GetProto3Optional() +} + +// HasPresence returns true if this field can distinguish when a value is +// present or not. Scalar fields in "proto3" syntax files, for example, return +// false since absent values are indistinguishable from zero values. +func (fd *FieldDescriptor) HasPresence() bool { + return fd.wrapped.HasPresence() +} + +// IsMap returns true if this is a map field. If so, it will have the "repeated" +// label its type will be a message that represents a map entry. The map entry +// message will have exactly two fields: tag #1 is the key and tag #2 is the value. +func (fd *FieldDescriptor) IsMap() bool { + return fd.wrapped.IsMap() +} + +// GetMapKeyType returns the type of the key field if this is a map field. If it is +// not a map field, nil is returned. +func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor { + if fd.IsMap() { + return fd.msgType.FindFieldByNumber(int32(1)) + } + return nil +} + +// GetMapValueType returns the type of the value field if this is a map field. If it +// is not a map field, nil is returned. +func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor { + if fd.IsMap() { + return fd.msgType.FindFieldByNumber(int32(2)) + } + return nil +} + +// GetMessageType returns the type of this field if it is a message type. If +// this field is not a message type, it returns nil. +func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor { + return fd.msgType +} + +// GetEnumType returns the type of this field if it is an enum type. If this +// field is not an enum type, it returns nil. +func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor { + return fd.enumType +} + +// GetDefaultValue returns the default value for this field. +// +// If this field represents a message type, this method always returns nil (even though +// for proto2 files, the default value should be a default instance of the message type). +// If the field represents an enum type, this method returns an int32 corresponding to the +// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If +// this field is repeated (and not a map), it returns a nil []interface{}. +// +// Otherwise, it returns the declared default value for the field or a zero value, if no +// default is declared or if the file is proto3. The type of said return value corresponds +// to the type of the field: +// +// +-------------------------+-----------+ +// | Declared Type | Go Type | +// +-------------------------+-----------+ +// | int32, sint32, sfixed32 | int32 | +// | int64, sint64, sfixed64 | int64 | +// | uint32, fixed32 | uint32 | +// | uint64, fixed64 | uint64 | +// | float | float32 | +// | double | double32 | +// | bool | bool | +// | string | string | +// | bytes | []byte | +// +-------------------------+-----------+ +func (fd *FieldDescriptor) GetDefaultValue() interface{} { + return fd.getDefaultValue() +} + +// EnumDescriptor describes an enum declared in a proto file. +type EnumDescriptor struct { + wrapped protoreflect.EnumDescriptor + proto *descriptorpb.EnumDescriptorProto + parent Descriptor + file *FileDescriptor + values []*EnumValueDescriptor + valuesByNum sortedValues + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapEnum, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (ed *EnumDescriptor) Unwrap() protoreflect.Descriptor { + return ed.wrapped +} + +// UnwrapEnum returns the underlying protoreflect.EnumDescriptor. +func (ed *EnumDescriptor) UnwrapEnum() protoreflect.EnumDescriptor { + return ed.wrapped +} + +func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, ed protoreflect.EnumDescriptor, edp *descriptorpb.EnumDescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *EnumDescriptor { + ret := &EnumDescriptor{ + wrapped: ed, + proto: edp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + path = append(path, internal.Enum_valuesTag) + for i := 0; i < ed.Values().Len(); i++ { + src := ed.Values().Get(i) + srcProto := edp.GetValue()[src.Index()] + evd := createEnumValueDescriptor(fd, ret, src, srcProto, append(path, int32(i))) + symbols[string(src.FullName())] = evd + // NB: for backwards compatibility, also register the enum value as if + // scoped within the enum (counter-intuitively, enum value full names are + // scoped in the enum's parent element). EnumValueDescripto.GetFullyQualifiedName + // returns that alternate full name. + symbols[evd.GetFullyQualifiedName()] = evd + ret.values = append(ret.values, evd) + } + if len(ret.values) > 0 { + ret.valuesByNum = make(sortedValues, len(ret.values)) + copy(ret.valuesByNum, ret.values) + sort.Stable(ret.valuesByNum) + } + return ret +} + +type sortedValues []*EnumValueDescriptor + +func (sv sortedValues) Len() int { + return len(sv) +} + +func (sv sortedValues) Less(i, j int) bool { + return sv[i].GetNumber() < sv[j].GetNumber() +} + +func (sv sortedValues) Swap(i, j int) { + sv[i], sv[j] = sv[j], sv[i] + +} + +// GetName returns the simple (unqualified) name of the enum type. +func (ed *EnumDescriptor) GetName() string { + return string(ed.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the enum type. +// This includes the package name (if there is one) as well as the names of any +// enclosing messages. +func (ed *EnumDescriptor) GetFullyQualifiedName() string { + return string(ed.wrapped.FullName()) +} + +// GetParent returns the enum type's enclosing descriptor. For top-level enums, +// this will be a file descriptor. Otherwise it will be the descriptor for the +// enclosing message. +func (ed *EnumDescriptor) GetParent() Descriptor { + return ed.parent +} + +// GetFile returns the descriptor for the file in which this enum is defined. +func (ed *EnumDescriptor) GetFile() *FileDescriptor { + return ed.file +} + +// GetOptions returns the enum type's options. Most usages will be more +// interested in GetEnumOptions, which has a concrete return type. This generic +// version is present to satisfy the Descriptor interface. +func (ed *EnumDescriptor) GetOptions() proto.Message { + return ed.proto.GetOptions() +} + +// GetEnumOptions returns the enum type's options. +func (ed *EnumDescriptor) GetEnumOptions() *descriptorpb.EnumOptions { + return ed.proto.GetOptions() +} + +// GetSourceInfo returns source info for the enum type, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// enum type was defined and also contains comments associated with the enum +// definition. +func (ed *EnumDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return ed.file.sourceInfo.Get(ed.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsEnumDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (ed *EnumDescriptor) AsProto() proto.Message { + return ed.proto +} + +// AsEnumDescriptorProto returns the underlying descriptor proto. +func (ed *EnumDescriptor) AsEnumDescriptorProto() *descriptorpb.EnumDescriptorProto { + return ed.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (ed *EnumDescriptor) String() string { + return ed.proto.String() +} + +// GetValues returns all of the allowed values defined for this enum. +func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor { + return ed.values +} + +// FindValueByName finds the enum value with the given name. If no such value exists +// then nil is returned. +func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor { + fqn := fmt.Sprintf("%s.%s", ed.GetFullyQualifiedName(), name) + if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok { + return vd + } else { + return nil + } +} + +// FindValueByNumber finds the value with the given numeric value. If no such value +// exists then nil is returned. If aliases are allowed and multiple values have the +// given number, the first declared value is returned. +func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor { + index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num }) + if index < len(ed.valuesByNum) { + vd := ed.valuesByNum[index] + if vd.GetNumber() == num { + return vd + } + } + return nil +} + +// EnumValueDescriptor describes an allowed value of an enum declared in a proto file. +type EnumValueDescriptor struct { + wrapped protoreflect.EnumValueDescriptor + proto *descriptorpb.EnumValueDescriptorProto + parent *EnumDescriptor + file *FileDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapEnumValue, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (vd *EnumValueDescriptor) Unwrap() protoreflect.Descriptor { + return vd.wrapped +} + +// UnwrapEnumValue returns the underlying protoreflect.EnumValueDescriptor. +func (vd *EnumValueDescriptor) UnwrapEnumValue() protoreflect.EnumValueDescriptor { + return vd.wrapped +} + +func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, evd protoreflect.EnumValueDescriptor, evdp *descriptorpb.EnumValueDescriptorProto, path []int32) *EnumValueDescriptor { + return &EnumValueDescriptor{ + wrapped: evd, + proto: evdp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } +} + +func (vd *EnumValueDescriptor) resolve(path []int32) { + vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy +} + +// GetName returns the name of the enum value. +func (vd *EnumValueDescriptor) GetName() string { + return string(vd.wrapped.Name()) +} + +// GetNumber returns the numeric value associated with this enum value. +func (vd *EnumValueDescriptor) GetNumber() int32 { + return int32(vd.wrapped.Number()) +} + +// GetFullyQualifiedName returns the fully qualified name of the enum value. +// Unlike GetName, this includes fully qualified name of the enclosing enum. +func (vd *EnumValueDescriptor) GetFullyQualifiedName() string { + // NB: Technically, we do not return the correct value. Enum values are + // scoped within the enclosing element, not within the enum itself (which + // is very non-intuitive, but it follows C++ scoping rules). The value + // returned from vd.wrapped.FullName() is correct. However, we return + // something different, just for backwards compatibility, as this package + // has always instead returned the name scoped inside the enum. + return fmt.Sprintf("%s.%s", vd.parent.GetFullyQualifiedName(), vd.wrapped.Name()) +} + +// GetParent returns the descriptor for the enum in which this enum value is +// defined. Most usages will prefer to use GetEnum, which has a concrete return +// type. This more generic method is present to satisfy the Descriptor interface. +func (vd *EnumValueDescriptor) GetParent() Descriptor { + return vd.parent +} + +// GetEnum returns the enum in which this enum value is defined. +func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor { + return vd.parent +} + +// GetFile returns the descriptor for the file in which this enum value is +// defined. +func (vd *EnumValueDescriptor) GetFile() *FileDescriptor { + return vd.file +} + +// GetOptions returns the enum value's options. Most usages will be more +// interested in GetEnumValueOptions, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (vd *EnumValueDescriptor) GetOptions() proto.Message { + return vd.proto.GetOptions() +} + +// GetEnumValueOptions returns the enum value's options. +func (vd *EnumValueDescriptor) GetEnumValueOptions() *descriptorpb.EnumValueOptions { + return vd.proto.GetOptions() +} + +// GetSourceInfo returns source info for the enum value, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// enum value was defined and also contains comments associated with the enum +// value definition. +func (vd *EnumValueDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return vd.file.sourceInfo.Get(vd.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsEnumValueDescriptorProto, which has a concrete return type. +// This generic version is present to satisfy the Descriptor interface. +func (vd *EnumValueDescriptor) AsProto() proto.Message { + return vd.proto +} + +// AsEnumValueDescriptorProto returns the underlying descriptor proto. +func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto { + return vd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (vd *EnumValueDescriptor) String() string { + return vd.proto.String() +} + +// ServiceDescriptor describes an RPC service declared in a proto file. +type ServiceDescriptor struct { + wrapped protoreflect.ServiceDescriptor + proto *descriptorpb.ServiceDescriptorProto + file *FileDescriptor + methods []*MethodDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapService, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (sd *ServiceDescriptor) Unwrap() protoreflect.Descriptor { + return sd.wrapped +} + +// UnwrapService returns the underlying protoreflect.ServiceDescriptor. +func (sd *ServiceDescriptor) UnwrapService() protoreflect.ServiceDescriptor { + return sd.wrapped +} + +func createServiceDescriptor(fd *FileDescriptor, sd protoreflect.ServiceDescriptor, sdp *descriptorpb.ServiceDescriptorProto, symbols map[string]Descriptor, path []int32) *ServiceDescriptor { + ret := &ServiceDescriptor{ + wrapped: sd, + proto: sdp, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + path = append(path, internal.Service_methodsTag) + for i := 0; i < sd.Methods().Len(); i++ { + src := sd.Methods().Get(i) + srcProto := sdp.GetMethod()[src.Index()] + md := createMethodDescriptor(fd, ret, src, srcProto, append(path, int32(i))) + symbols[string(src.FullName())] = md + ret.methods = append(ret.methods, md) + } + return ret +} + +func (sd *ServiceDescriptor) resolve(cache descriptorCache) error { + for _, md := range sd.methods { + if err := md.resolve(cache); err != nil { + return err + } + } + return nil +} + +// GetName returns the simple (unqualified) name of the service. +func (sd *ServiceDescriptor) GetName() string { + return string(sd.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the service. This +// includes the package name (if there is one). +func (sd *ServiceDescriptor) GetFullyQualifiedName() string { + return string(sd.wrapped.FullName()) +} + +// GetParent returns the descriptor for the file in which this service is +// defined. Most usages will prefer to use GetFile, which has a concrete return +// type. This more generic method is present to satisfy the Descriptor interface. +func (sd *ServiceDescriptor) GetParent() Descriptor { + return sd.file +} + +// GetFile returns the descriptor for the file in which this service is defined. +func (sd *ServiceDescriptor) GetFile() *FileDescriptor { + return sd.file +} + +// GetOptions returns the service's options. Most usages will be more interested +// in GetServiceOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (sd *ServiceDescriptor) GetOptions() proto.Message { + return sd.proto.GetOptions() +} + +// GetServiceOptions returns the service's options. +func (sd *ServiceDescriptor) GetServiceOptions() *descriptorpb.ServiceOptions { + return sd.proto.GetOptions() +} + +// GetSourceInfo returns source info for the service, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// service was defined and also contains comments associated with the service +// definition. +func (sd *ServiceDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return sd.file.sourceInfo.Get(sd.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsServiceDescriptorProto, which has a concrete return type. +// This generic version is present to satisfy the Descriptor interface. +func (sd *ServiceDescriptor) AsProto() proto.Message { + return sd.proto +} + +// AsServiceDescriptorProto returns the underlying descriptor proto. +func (sd *ServiceDescriptor) AsServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto { + return sd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (sd *ServiceDescriptor) String() string { + return sd.proto.String() +} + +// GetMethods returns all of the RPC methods for this service. +func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor { + return sd.methods +} + +// FindMethodByName finds the method with the given name. If no such method exists +// then nil is returned. +func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor { + fqn := fmt.Sprintf("%s.%s", sd.GetFullyQualifiedName(), name) + if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok { + return md + } else { + return nil + } +} + +// MethodDescriptor describes an RPC method declared in a proto file. +type MethodDescriptor struct { + wrapped protoreflect.MethodDescriptor + proto *descriptorpb.MethodDescriptorProto + parent *ServiceDescriptor + file *FileDescriptor + inType *MessageDescriptor + outType *MessageDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapMethod, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (md *MethodDescriptor) Unwrap() protoreflect.Descriptor { + return md.wrapped +} + +// UnwrapMethod returns the underlying protoreflect.MethodDescriptor. +func (md *MethodDescriptor) UnwrapMethod() protoreflect.MethodDescriptor { + return md.wrapped +} + +func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, md protoreflect.MethodDescriptor, mdp *descriptorpb.MethodDescriptorProto, path []int32) *MethodDescriptor { + // request and response types get resolved later + return &MethodDescriptor{ + wrapped: md, + proto: mdp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } +} + +func (md *MethodDescriptor) resolve(cache descriptorCache) error { + if desc, err := resolve(md.file, md.wrapped.Input(), cache); err != nil { + return err + } else { + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("method %v has request type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetInputType(), descriptorType(desc)) + } + md.inType = msgType + } + if desc, err := resolve(md.file, md.wrapped.Output(), cache); err != nil { + return err + } else { + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("method %v has response type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetOutputType(), descriptorType(desc)) + } + md.outType = msgType + } + return nil +} + +// GetName returns the name of the method. +func (md *MethodDescriptor) GetName() string { + return string(md.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the method. Unlike +// GetName, this includes fully qualified name of the enclosing service. +func (md *MethodDescriptor) GetFullyQualifiedName() string { + return string(md.wrapped.FullName()) +} + +// GetParent returns the descriptor for the service in which this method is +// defined. Most usages will prefer to use GetService, which has a concrete +// return type. This more generic method is present to satisfy the Descriptor +// interface. +func (md *MethodDescriptor) GetParent() Descriptor { + return md.parent +} + +// GetService returns the RPC service in which this method is declared. +func (md *MethodDescriptor) GetService() *ServiceDescriptor { + return md.parent +} + +// GetFile returns the descriptor for the file in which this method is defined. +func (md *MethodDescriptor) GetFile() *FileDescriptor { + return md.file +} + +// GetOptions returns the method's options. Most usages will be more interested +// in GetMethodOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (md *MethodDescriptor) GetOptions() proto.Message { + return md.proto.GetOptions() +} + +// GetMethodOptions returns the method's options. +func (md *MethodDescriptor) GetMethodOptions() *descriptorpb.MethodOptions { + return md.proto.GetOptions() +} + +// GetSourceInfo returns source info for the method, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// method was defined and also contains comments associated with the method +// definition. +func (md *MethodDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return md.file.sourceInfo.Get(md.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsMethodDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (md *MethodDescriptor) AsProto() proto.Message { + return md.proto +} + +// AsMethodDescriptorProto returns the underlying descriptor proto. +func (md *MethodDescriptor) AsMethodDescriptorProto() *descriptorpb.MethodDescriptorProto { + return md.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (md *MethodDescriptor) String() string { + return md.proto.String() +} + +// IsServerStreaming returns true if this is a server-streaming method. +func (md *MethodDescriptor) IsServerStreaming() bool { + return md.wrapped.IsStreamingServer() +} + +// IsClientStreaming returns true if this is a client-streaming method. +func (md *MethodDescriptor) IsClientStreaming() bool { + return md.wrapped.IsStreamingClient() +} + +// GetInputType returns the input type, or request type, of the RPC method. +func (md *MethodDescriptor) GetInputType() *MessageDescriptor { + return md.inType +} + +// GetOutputType returns the output type, or response type, of the RPC method. +func (md *MethodDescriptor) GetOutputType() *MessageDescriptor { + return md.outType +} + +// OneOfDescriptor describes a one-of field set declared in a protocol buffer message. +type OneOfDescriptor struct { + wrapped protoreflect.OneofDescriptor + proto *descriptorpb.OneofDescriptorProto + parent *MessageDescriptor + file *FileDescriptor + choices []*FieldDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapOneOf, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (od *OneOfDescriptor) Unwrap() protoreflect.Descriptor { + return od.wrapped +} + +// UnwrapOneOf returns the underlying protoreflect.OneofDescriptor. +func (od *OneOfDescriptor) UnwrapOneOf() protoreflect.OneofDescriptor { + return od.wrapped +} + +func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, od protoreflect.OneofDescriptor, odp *descriptorpb.OneofDescriptorProto, path []int32) *OneOfDescriptor { + ret := &OneOfDescriptor{ + wrapped: od, + proto: odp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + for _, f := range parent.fields { + oi := f.proto.OneofIndex + if oi != nil && *oi == int32(index) { + f.oneOf = ret + ret.choices = append(ret.choices, f) + } + } + return ret +} + +// GetName returns the name of the one-of. +func (od *OneOfDescriptor) GetName() string { + return string(od.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike +// GetName, this includes fully qualified name of the enclosing message. +func (od *OneOfDescriptor) GetFullyQualifiedName() string { + return string(od.wrapped.FullName()) +} + +// GetParent returns the descriptor for the message in which this one-of is +// defined. Most usages will prefer to use GetOwner, which has a concrete +// return type. This more generic method is present to satisfy the Descriptor +// interface. +func (od *OneOfDescriptor) GetParent() Descriptor { + return od.parent +} + +// GetOwner returns the message to which this one-of field set belongs. +func (od *OneOfDescriptor) GetOwner() *MessageDescriptor { + return od.parent +} + +// GetFile returns the descriptor for the file in which this one-fof is defined. +func (od *OneOfDescriptor) GetFile() *FileDescriptor { + return od.file +} + +// GetOptions returns the one-of's options. Most usages will be more interested +// in GetOneOfOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (od *OneOfDescriptor) GetOptions() proto.Message { + return od.proto.GetOptions() +} + +// GetOneOfOptions returns the one-of's options. +func (od *OneOfDescriptor) GetOneOfOptions() *descriptorpb.OneofOptions { + return od.proto.GetOptions() +} + +// GetSourceInfo returns source info for the one-of, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// one-of was defined and also contains comments associated with the one-of +// definition. +func (od *OneOfDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return od.file.sourceInfo.Get(od.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsOneofDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (od *OneOfDescriptor) AsProto() proto.Message { + return od.proto +} + +// AsOneofDescriptorProto returns the underlying descriptor proto. +func (od *OneOfDescriptor) AsOneofDescriptorProto() *descriptorpb.OneofDescriptorProto { + return od.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (od *OneOfDescriptor) String() string { + return od.proto.String() +} + +// GetChoices returns the fields that are part of the one-of field set. At most one of +// these fields may be set for a given message. +func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor { + return od.choices +} + +func (od *OneOfDescriptor) IsSynthetic() bool { + return od.wrapped.IsSynthetic() +} + +func resolve(fd *FileDescriptor, src protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) { + d := cache.get(src) + if d != nil { + return d, nil + } + + fqn := string(src.FullName()) + + d = fd.FindSymbol(fqn) + if d != nil { + return d, nil + } + + for _, dep := range fd.deps { + d := dep.FindSymbol(fqn) + if d != nil { + return d, nil + } + } + + return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), fqn) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go new file mode 100644 index 00000000..25d619a2 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go @@ -0,0 +1,30 @@ +//go:build appengine || gopherjs || purego +// +build appengine gopherjs purego + +// NB: other environments where unsafe is unappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package desc + +type jsonNameMap struct{} +type memoizedDefault struct{} + +// FindFieldByJSONName finds the field with the given JSON field name. If no such +// field exists then nil is returned. Only regular fields are returned, not +// extensions. +func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor { + // NB: With allowed use of unsafe, we use it to atomically define an index + // via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index + // and must do a linear scan of fields each time. + for _, f := range md.fields { + jn := f.GetJSONName() + if jn == jsonName { + return f + } + } + return nil +} + +func (fd *FieldDescriptor) getDefaultValue() interface{} { + return fd.determineDefault() +} diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go new file mode 100644 index 00000000..691f0d88 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go @@ -0,0 +1,59 @@ +//go:build !appengine && !gopherjs && !purego +// +build !appengine,!gopherjs,!purego + +// NB: other environments where unsafe is unappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package desc + +import ( + "sync/atomic" + "unsafe" +) + +type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe +type memoizedDefault *interface{} // loaded/stored atomically via atomic+unsafe + +// FindFieldByJSONName finds the field with the given JSON field name. If no such +// field exists then nil is returned. Only regular fields are returned, not +// extensions. +func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor { + // NB: We don't want to eagerly index JSON names because many programs won't use it. + // So we want to do it lazily, but also make sure the result is thread-safe. So we + // atomically load/store the map as if it were a normal pointer. We don't use other + // mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to + // do this lazily because those types cannot be copied, and we'd rather not induce + // 'go vet' errors in programs that use descriptors and try to copy them. + // If multiple goroutines try to access the index at the same time, before it is + // built, they will all end up computing the index redundantly. Future reads of + // the index will use whatever was the "last one stored" by those racing goroutines. + // Since building the index is deterministic, this is fine: all indices computed + // will be the same. + addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames)) + jsonNames := atomic.LoadPointer(addrOfJsonNames) + var index map[string]*FieldDescriptor + if jsonNames == nil { + // slow path: compute the index + index = map[string]*FieldDescriptor{} + for _, f := range md.fields { + jn := f.GetJSONName() + index[jn] = f + } + atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index))) + } else { + *(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames + } + return index[jsonName] +} + +func (fd *FieldDescriptor) getDefaultValue() interface{} { + addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def)) + def := atomic.LoadPointer(addrOfDef) + if def != nil { + return *(*interface{})(def) + } + // slow path: compute the default, potentially involves decoding value + d := fd.determineDefault() + atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d))) + return d +} diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go new file mode 100644 index 00000000..dfac5c72 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/doc.go @@ -0,0 +1,62 @@ +// Package desc contains "rich descriptors" for protocol buffers. The built-in +// descriptor types are simple protobuf messages, each one representing a +// different kind of element in the AST of a .proto source file. +// +// Because of this inherent "tree" quality, these build-in descriptors cannot +// refer to their enclosing file descriptor. Nor can a field descriptor refer to +// a message or enum descriptor that represents the field's type (for enum and +// nested message fields). All such links must instead be stringly typed. This +// limitation makes them much harder to use for doing interesting things with +// reflection. +// +// Without this package, resolving references to types is particularly complex. +// For example, resolving a field's type, the message type an extension extends, +// or the request and response types of an RPC method all require searching +// through symbols defined not only in the file in which these elements are +// declared but also in its transitive closure of dependencies. +// +// "Rich descriptors" avoid the need to deal with the complexities described +// above. A rich descriptor has all type references resolved and provides +// methods to access other rich descriptors for all referenced elements. Each +// rich descriptor has a usefully broad API, but does not try to mimic the full +// interface of the underlying descriptor proto. Instead, every rich descriptor +// provides access to that underlying proto, for extracting descriptor +// properties that are not immediately accessible through rich descriptor's +// methods. +// +// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same +// repo to see just how useful rich descriptors really are. +// +// # Loading Descriptors +// +// Rich descriptors can be accessed in similar ways as their "poor" cousins +// (descriptor protos). Instead of using proto.FileDescriptor, use +// desc.LoadFileDescriptor. Message descriptors and extension field descriptors +// can also be easily accessed using desc.LoadMessageDescriptor and +// desc.LoadFieldDescriptorForExtension, respectively. +// +// If you are using the protoc-gen-gosrcinfo plugin (also in this repo), then +// the descriptors returned from these Load* functions will include source code +// information, and thus include comments for elements. +// +// # Creating Descriptors +// +// It is also possible create rich descriptors for proto messages that a given +// Go program doesn't even know about. For example, they could be loaded from a +// FileDescriptorSet file (which can be generated by protoc) or loaded from a +// server. This enables interesting things like dynamic clients: where a Go +// program can be an RPC client of a service it wasn't compiled to know about. +// +// You cannot create a message descriptor without also creating its enclosing +// file, because the enclosing file is what contains other relevant information +// like other symbols and dependencies/imports, which is how type references +// are resolved (such as when a field in a message has a type that is another +// message or enum). +// +// So the functions in this package for creating descriptors are all for +// creating *file* descriptors. See the various Create* functions for more +// information. +// +// Also see the desc/builder sub-package, for another API that makes it easier +// to synthesize descriptors programmatically. +package desc diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go new file mode 100644 index 00000000..8e6a0d6e --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/imports.go @@ -0,0 +1,360 @@ +package desc + +import ( + "fmt" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + globalImportPathConf map[string]string + globalImportPathMu sync.RWMutex +) + +// RegisterImportPath registers an alternate import path for a given registered +// proto file path. For more details on why alternate import paths may need to +// be configured, see ImportResolver. +// +// This method panics if provided invalid input. An empty importPath is invalid. +// An un-registered registerPath is also invalid. For example, if an attempt is +// made to register the import path "foo/bar.proto" as "bar.proto", but there is +// no "bar.proto" registered in the Go protobuf runtime, this method will panic. +// This method also panics if an attempt is made to register the same import +// path more than once. +// +// This function works globally, applying to all descriptors loaded by this +// package. If you instead want more granular support for handling alternate +// import paths -- such as for a single invocation of a function in this +// package or when the alternate path is only used from one file (so you don't +// want the alternate path used when loading every other file), use an +// ImportResolver instead. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func RegisterImportPath(registerPath, importPath string) { + if len(importPath) == 0 { + panic("import path cannot be empty") + } + _, err := protoregistry.GlobalFiles.FindFileByPath(registerPath) + if err != nil { + panic(fmt.Sprintf("path %q is not a registered proto file", registerPath)) + } + globalImportPathMu.Lock() + defer globalImportPathMu.Unlock() + if reg := globalImportPathConf[importPath]; reg != "" { + panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg)) + } + if globalImportPathConf == nil { + globalImportPathConf = map[string]string{} + } + globalImportPathConf[importPath] = registerPath +} + +// ResolveImport resolves the given import path. If it has been registered as an +// alternate via RegisterImportPath, the registered path is returned. Otherwise, +// the given import path is returned unchanged. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func ResolveImport(importPath string) string { + importPath = clean(importPath) + globalImportPathMu.RLock() + defer globalImportPathMu.RUnlock() + reg := globalImportPathConf[importPath] + if reg == "" { + return importPath + } + return reg +} + +// ImportResolver lets you work-around linking issues that are caused by +// mismatches between how a particular proto source file is registered in the Go +// protobuf runtime and how that same file is imported by other files. The file +// is registered using the same relative path given to protoc when the file is +// compiled (i.e. when Go code is generated). So if any file tries to import +// that source file, but using a different relative path, then a link error will +// occur when this package tries to load a descriptor for the importing file. +// +// For example, let's say we have two proto source files: "foo/bar.proto" and +// "fubar/baz.proto". The latter imports the former using a line like so: +// +// import "foo/bar.proto"; +// +// However, when protoc is invoked, the command-line args looks like so: +// +// protoc -Ifoo/ --go_out=foo/ bar.proto +// protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto +// +// Because the path given to protoc is just "bar.proto" and "baz.proto", this is +// how they are registered in the Go protobuf runtime. So, when loading the +// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto" +// but will find no file registered with that path: +// +// fd, err := desc.LoadFileDescriptor("baz.proto") +// // err will be non-nil, complaining that there is no such file +// // found named "foo/bar.proto" +// +// This can be remedied by registering alternate import paths using an +// ImportResolver. Continuing with the example above, the code below would fix +// any link issue: +// +// var r desc.ImportResolver +// r.RegisterImportPath("bar.proto", "foo/bar.proto") +// fd, err := r.LoadFileDescriptor("baz.proto") +// // err will be nil; descriptor successfully loaded! +// +// If there are files that are *always* imported using a different relative +// path then how they are registered, consider using the global +// RegisterImportPath function, so you don't have to use an ImportResolver for +// every file that imports it. +// +// Note that the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// customizing import paths for descriptor resolution is no longer necessary. +type ImportResolver struct { + children map[string]*ImportResolver + importPaths map[string]string + + // By default, an ImportResolver will fallback to consulting any paths + // registered via the top-level RegisterImportPath function. Setting this + // field to true will cause the ImportResolver to skip that fallback and + // only examine its own locally registered paths. + SkipFallbackRules bool +} + +// ResolveImport resolves the given import path in the context of the given +// source file. If a matching alternate has been registered with this resolver +// via a call to RegisterImportPath or RegisterImportPathFrom, then the +// registered path is returned. Otherwise, the given import path is returned +// unchanged. +func (r *ImportResolver) ResolveImport(source, importPath string) string { + if r != nil { + res := r.resolveImport(clean(source), clean(importPath)) + if res != "" { + return res + } + if r.SkipFallbackRules { + return importPath + } + } + return ResolveImport(importPath) +} + +func (r *ImportResolver) resolveImport(source, importPath string) string { + if source == "" { + return r.importPaths[importPath] + } + var car, cdr string + idx := strings.IndexRune(source, '/') + if idx < 0 { + car, cdr = source, "" + } else { + car, cdr = source[:idx], source[idx+1:] + } + ch := r.children[car] + if ch != nil { + if reg := ch.resolveImport(cdr, importPath); reg != "" { + return reg + } + } + return r.importPaths[importPath] +} + +// RegisterImportPath registers an alternate import path for a given registered +// proto file path with this resolver. Any appearance of the given import path +// when linking files will instead try to link the given registered path. If the +// registered path cannot be located, then linking will fallback to the actual +// imported path. +// +// This method will panic if given an empty path or if the same import path is +// registered more than once. +// +// To constrain the contexts where the given import path is to be re-written, +// use RegisterImportPathFrom instead. +func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) { + r.RegisterImportPathFrom(registerPath, importPath, "") +} + +// RegisterImportPathFrom registers an alternate import path for a given +// registered proto file path with this resolver, but only for imports in the +// specified source context. +// +// The source context can be the name of a folder or a proto source file. Any +// appearance of the given import path in that context will instead try to link +// the given registered path. To be in context, the file that is being linked +// (i.e. the one whose import statement is being resolved) must be the same +// relative path of the source context or be a sub-path (i.e. a descendant of +// the source folder). +// +// If the registered path cannot be located, then linking will fallback to the +// actual imported path. +// +// This method will panic if given an empty path. The source context, on the +// other hand, is allowed to be blank. A blank source matches all files. This +// method also panics if the same import path is registered in the same source +// context more than once. +func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) { + importPath = clean(importPath) + if len(importPath) == 0 { + panic("import path cannot be empty") + } + registerPath = clean(registerPath) + if len(registerPath) == 0 { + panic("registered path cannot be empty") + } + r.registerImportPathFrom(registerPath, importPath, clean(source)) +} + +func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) { + if source == "" { + if r.importPaths == nil { + r.importPaths = map[string]string{} + } else if reg := r.importPaths[importPath]; reg != "" { + panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath)) + } + r.importPaths[importPath] = registerPath + return + } + var car, cdr string + idx := strings.IndexRune(source, '/') + if idx < 0 { + car, cdr = source, "" + } else { + car, cdr = source[:idx], source[idx+1:] + } + ch := r.children[car] + if ch == nil { + if r.children == nil { + r.children = map[string]*ImportResolver{} + } + ch = &ImportResolver{} + r.children[car] = ch + } + ch.registerImportPathFrom(registerPath, importPath, cdr) +} + +// LoadFileDescriptor is the same as the package function of the same name, but +// any alternate paths configured in this resolver are used when linking the +// given descriptor proto. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) { + return LoadFileDescriptor(filePath) +} + +// LoadMessageDescriptor is the same as the package function of the same name, +// but any alternate paths configured in this resolver are used when linking +// files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) { + return LoadMessageDescriptor(msgName) +} + +// LoadMessageDescriptorForMessage is the same as the package function of the +// same name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) { + return LoadMessageDescriptorForMessage(msg) +} + +// LoadMessageDescriptorForType is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) { + return LoadMessageDescriptorForType(msgType) +} + +// LoadEnumDescriptorForEnum is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) { + return LoadEnumDescriptorForEnum(enum) +} + +// LoadEnumDescriptorForType is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) { + return LoadEnumDescriptorForType(enumType) +} + +// LoadFieldDescriptorForExtension is the same as the package function of the +// same name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) { + return LoadFieldDescriptorForExtension(ext) +} + +// CreateFileDescriptor is the same as the package function of the same name, +// but any alternate paths configured in this resolver are used when linking the +// given descriptor proto. +func (r *ImportResolver) CreateFileDescriptor(fdp *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) { + return createFileDescriptor(fdp, deps, r) +} + +// CreateFileDescriptors is the same as the package function of the same name, +// but any alternate paths configured in this resolver are used when linking the +// given descriptor protos. +func (r *ImportResolver) CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) { + return createFileDescriptors(fds, r) +} + +// CreateFileDescriptorFromSet is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking the descriptor protos in the given set. +func (r *ImportResolver) CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) { + return createFileDescriptorFromSet(fds, r) +} + +// CreateFileDescriptorsFromSet is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking the descriptor protos in the given set. +func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) { + return createFileDescriptorsFromSet(fds, r) +} + +const dotPrefix = "./" + +func clean(path string) string { + if path == "" { + return "" + } + path = filepath.ToSlash(filepath.Clean(path)) + if path == "." { + return "" + } + return strings.TrimPrefix(path, dotPrefix) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go new file mode 100644 index 00000000..aa8c3e99 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go @@ -0,0 +1,75 @@ +package internal + +import ( + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +// ProcessProto3OptionalFields adds synthetic oneofs to the given message descriptor +// for each proto3 optional field. It also updates the fields to have the correct +// oneof index reference. The given callback, if not nil, is called for each synthetic +// oneof created. +func ProcessProto3OptionalFields(msgd *descriptorpb.DescriptorProto, callback func(*descriptorpb.FieldDescriptorProto, *descriptorpb.OneofDescriptorProto)) { + var allNames map[string]struct{} + for _, fd := range msgd.Field { + if fd.GetProto3Optional() { + // lazy init the set of all names + if allNames == nil { + allNames = map[string]struct{}{} + for _, fd := range msgd.Field { + allNames[fd.GetName()] = struct{}{} + } + for _, od := range msgd.OneofDecl { + allNames[od.GetName()] = struct{}{} + } + // NB: protoc only considers names of other fields and oneofs + // when computing the synthetic oneof name. But that feels like + // a bug, since it means it could generate a name that conflicts + // with some other symbol defined in the message. If it's decided + // that's NOT a bug and is desirable, then we should remove the + // following four loops to mimic protoc's behavior. + for _, xd := range msgd.Extension { + allNames[xd.GetName()] = struct{}{} + } + for _, ed := range msgd.EnumType { + allNames[ed.GetName()] = struct{}{} + for _, evd := range ed.Value { + allNames[evd.GetName()] = struct{}{} + } + } + for _, fd := range msgd.NestedType { + allNames[fd.GetName()] = struct{}{} + } + for _, n := range msgd.ReservedName { + allNames[n] = struct{}{} + } + } + + // Compute a name for the synthetic oneof. This uses the same + // algorithm as used in protoc: + // https://github.com/protocolbuffers/protobuf/blob/74ad62759e0a9b5a21094f3fb9bb4ebfaa0d1ab8/src/google/protobuf/compiler/parser.cc#L785-L803 + ooName := fd.GetName() + if !strings.HasPrefix(ooName, "_") { + ooName = "_" + ooName + } + for { + _, ok := allNames[ooName] + if !ok { + // found a unique name + allNames[ooName] = struct{}{} + break + } + ooName = "X" + ooName + } + + fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl))) + ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)} + msgd.OneofDecl = append(msgd.OneofDecl, ood) + if callback != nil { + callback(fd, ood) + } + } + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go new file mode 100644 index 00000000..9f160a3e --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go @@ -0,0 +1,48 @@ +package internal + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" +) + +func RegisterExtensionsForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { + registerTypesForFile(reg, fd, true, false) +} + +func RegisterTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { + registerTypesForFile(reg, fd, false, false) +} + +func registerTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor, extensionsOnly, publicImportsOnly bool) { + registerTypes(reg, fd, extensionsOnly) + for i := 0; i < fd.Imports().Len(); i++ { + imp := fd.Imports().Get(i) + if imp.IsPublic || !publicImportsOnly { + registerTypesForFile(reg, imp, extensionsOnly, true) + } + } +} + +func registerTypes(reg *protoregistry.Types, elem fileOrMessage, extensionsOnly bool) { + for i := 0; i < elem.Extensions().Len(); i++ { + _ = reg.RegisterExtension(dynamicpb.NewExtensionType(elem.Extensions().Get(i))) + } + if !extensionsOnly { + for i := 0; i < elem.Messages().Len(); i++ { + _ = reg.RegisterMessage(dynamicpb.NewMessageType(elem.Messages().Get(i))) + } + for i := 0; i < elem.Enums().Len(); i++ { + _ = reg.RegisterEnum(dynamicpb.NewEnumType(elem.Enums().Get(i))) + } + } + for i := 0; i < elem.Messages().Len(); i++ { + registerTypes(reg, elem.Messages().Get(i), extensionsOnly) + } +} + +type fileOrMessage interface { + Extensions() protoreflect.ExtensionDescriptors + Messages() protoreflect.MessageDescriptors + Enums() protoreflect.EnumDescriptors +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go new file mode 100644 index 00000000..60371288 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go @@ -0,0 +1,107 @@ +package internal + +import ( + "google.golang.org/protobuf/types/descriptorpb" +) + +// SourceInfoMap is a map of paths in a descriptor to the corresponding source +// code info. +type SourceInfoMap map[string][]*descriptorpb.SourceCodeInfo_Location + +// Get returns the source code info for the given path. If there are +// multiple locations for the same path, the first one is returned. +func (m SourceInfoMap) Get(path []int32) *descriptorpb.SourceCodeInfo_Location { + v := m[asMapKey(path)] + if len(v) > 0 { + return v[0] + } + return nil +} + +// GetAll returns all source code info for the given path. +func (m SourceInfoMap) GetAll(path []int32) []*descriptorpb.SourceCodeInfo_Location { + return m[asMapKey(path)] +} + +// Add stores the given source code info for the given path. +func (m SourceInfoMap) Add(path []int32, loc *descriptorpb.SourceCodeInfo_Location) { + m[asMapKey(path)] = append(m[asMapKey(path)], loc) +} + +// PutIfAbsent stores the given source code info for the given path only if the +// given path does not exist in the map. This method returns true when the value +// is stored, false if the path already exists. +func (m SourceInfoMap) PutIfAbsent(path []int32, loc *descriptorpb.SourceCodeInfo_Location) bool { + k := asMapKey(path) + if _, ok := m[k]; ok { + return false + } + m[k] = []*descriptorpb.SourceCodeInfo_Location{loc} + return true +} + +func asMapKey(slice []int32) string { + // NB: arrays should be usable as map keys, but this does not + // work due to a bug: https://github.com/golang/go/issues/22605 + //rv := reflect.ValueOf(slice) + //arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem()) + //array := reflect.New(arrayType).Elem() + //reflect.Copy(array, rv) + //return array.Interface() + + b := make([]byte, len(slice)*4) + j := 0 + for _, s := range slice { + b[j] = byte(s) + b[j+1] = byte(s >> 8) + b[j+2] = byte(s >> 16) + b[j+3] = byte(s >> 24) + j += 4 + } + return string(b) +} + +// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the +// source code info in the given file descriptor proto. +func CreateSourceInfoMap(fd *descriptorpb.FileDescriptorProto) SourceInfoMap { + res := SourceInfoMap{} + PopulateSourceInfoMap(fd, res) + return res +} + +// PopulateSourceInfoMap populates the given SourceInfoMap with information from +// the given file descriptor. +func PopulateSourceInfoMap(fd *descriptorpb.FileDescriptorProto, m SourceInfoMap) { + for _, l := range fd.GetSourceCodeInfo().GetLocation() { + m.Add(l.Path, l) + } +} + +// NB: This wonkiness allows desc.Descriptor impl to implement an interface that +// is only usable from this package, by embedding a SourceInfoComputeFunc that +// implements the actual logic (which must live in desc package to avoid a +// dependency cycle). + +// SourceInfoComputer is a single method which will be invoked to recompute +// source info. This is needed for the protoparse package, which needs to link +// descriptors without source info in order to interpret options, but then needs +// to re-compute source info after that interpretation so that final linked +// descriptors expose the right info. +type SourceInfoComputer interface { + recomputeSourceInfo() +} + +// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will +// be aliased in the desc package to an unexported name so it is not marked as +// an exported field in reflection and not present in Go docs. +type SourceInfoComputeFunc func() + +func (f SourceInfoComputeFunc) recomputeSourceInfo() { + f() +} + +// RecomputeSourceInfo is used to initiate recomputation of source info. This is +// is used by the protoparse package, after it interprets options. +func RecomputeSourceInfo(c SourceInfoComputer) { + c.recomputeSourceInfo() +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go new file mode 100644 index 00000000..fcadbd1f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go @@ -0,0 +1,293 @@ +package internal + +import ( + "math" + "unicode" + "unicode/utf8" +) + +const ( + // MaxNormalTag is the maximum allowed tag number for a field in a normal message. + MaxNormalTag = 536870911 // 2^29 - 1 + + // MaxMessageSetTag is the maximum allowed tag number of a field in a message that + // uses the message set wire format. + MaxMessageSetTag = math.MaxInt32 - 1 + + // MaxTag is the maximum allowed tag number. (It is the same as MaxMessageSetTag + // since that is the absolute highest allowed.) + MaxTag = MaxMessageSetTag + + // SpecialReservedStart is the first tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedStart = 19000 + // SpecialReservedEnd is the last tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedEnd = 19999 + + // NB: It would be nice to use constants from generated code instead of + // hard-coding these here. But code-gen does not emit these as constants + // anywhere. The only places they appear in generated code are struct tags + // on fields of the generated descriptor protos. + + // File_packageTag is the tag number of the package element in a file + // descriptor proto. + File_packageTag = 2 + // File_dependencyTag is the tag number of the dependencies element in a + // file descriptor proto. + File_dependencyTag = 3 + // File_messagesTag is the tag number of the messages element in a file + // descriptor proto. + File_messagesTag = 4 + // File_enumsTag is the tag number of the enums element in a file descriptor + // proto. + File_enumsTag = 5 + // File_servicesTag is the tag number of the services element in a file + // descriptor proto. + File_servicesTag = 6 + // File_extensionsTag is the tag number of the extensions element in a file + // descriptor proto. + File_extensionsTag = 7 + // File_optionsTag is the tag number of the options element in a file + // descriptor proto. + File_optionsTag = 8 + // File_syntaxTag is the tag number of the syntax element in a file + // descriptor proto. + File_syntaxTag = 12 + // Message_nameTag is the tag number of the name element in a message + // descriptor proto. + Message_nameTag = 1 + // Message_fieldsTag is the tag number of the fields element in a message + // descriptor proto. + Message_fieldsTag = 2 + // Message_nestedMessagesTag is the tag number of the nested messages + // element in a message descriptor proto. + Message_nestedMessagesTag = 3 + // Message_enumsTag is the tag number of the enums element in a message + // descriptor proto. + Message_enumsTag = 4 + // Message_extensionRangeTag is the tag number of the extension ranges + // element in a message descriptor proto. + Message_extensionRangeTag = 5 + // Message_extensionsTag is the tag number of the extensions element in a + // message descriptor proto. + Message_extensionsTag = 6 + // Message_optionsTag is the tag number of the options element in a message + // descriptor proto. + Message_optionsTag = 7 + // Message_oneOfsTag is the tag number of the one-ofs element in a message + // descriptor proto. + Message_oneOfsTag = 8 + // Message_reservedRangeTag is the tag number of the reserved ranges element + // in a message descriptor proto. + Message_reservedRangeTag = 9 + // Message_reservedNameTag is the tag number of the reserved names element + // in a message descriptor proto. + Message_reservedNameTag = 10 + // ExtensionRange_startTag is the tag number of the start index in an + // extension range proto. + ExtensionRange_startTag = 1 + // ExtensionRange_endTag is the tag number of the end index in an + // extension range proto. + ExtensionRange_endTag = 2 + // ExtensionRange_optionsTag is the tag number of the options element in an + // extension range proto. + ExtensionRange_optionsTag = 3 + // ReservedRange_startTag is the tag number of the start index in a reserved + // range proto. + ReservedRange_startTag = 1 + // ReservedRange_endTag is the tag number of the end index in a reserved + // range proto. + ReservedRange_endTag = 2 + // Field_nameTag is the tag number of the name element in a field descriptor + // proto. + Field_nameTag = 1 + // Field_extendeeTag is the tag number of the extendee element in a field + // descriptor proto. + Field_extendeeTag = 2 + // Field_numberTag is the tag number of the number element in a field + // descriptor proto. + Field_numberTag = 3 + // Field_labelTag is the tag number of the label element in a field + // descriptor proto. + Field_labelTag = 4 + // Field_typeTag is the tag number of the type element in a field descriptor + // proto. + Field_typeTag = 5 + // Field_typeNameTag is the tag number of the type name element in a field + // descriptor proto. + Field_typeNameTag = 6 + // Field_defaultTag is the tag number of the default value element in a + // field descriptor proto. + Field_defaultTag = 7 + // Field_optionsTag is the tag number of the options element in a field + // descriptor proto. + Field_optionsTag = 8 + // Field_jsonNameTag is the tag number of the JSON name element in a field + // descriptor proto. + Field_jsonNameTag = 10 + // Field_proto3OptionalTag is the tag number of the proto3_optional element + // in a descriptor proto. + Field_proto3OptionalTag = 17 + // OneOf_nameTag is the tag number of the name element in a one-of + // descriptor proto. + OneOf_nameTag = 1 + // OneOf_optionsTag is the tag number of the options element in a one-of + // descriptor proto. + OneOf_optionsTag = 2 + // Enum_nameTag is the tag number of the name element in an enum descriptor + // proto. + Enum_nameTag = 1 + // Enum_valuesTag is the tag number of the values element in an enum + // descriptor proto. + Enum_valuesTag = 2 + // Enum_optionsTag is the tag number of the options element in an enum + // descriptor proto. + Enum_optionsTag = 3 + // Enum_reservedRangeTag is the tag number of the reserved ranges element in + // an enum descriptor proto. + Enum_reservedRangeTag = 4 + // Enum_reservedNameTag is the tag number of the reserved names element in + // an enum descriptor proto. + Enum_reservedNameTag = 5 + // EnumVal_nameTag is the tag number of the name element in an enum value + // descriptor proto. + EnumVal_nameTag = 1 + // EnumVal_numberTag is the tag number of the number element in an enum + // value descriptor proto. + EnumVal_numberTag = 2 + // EnumVal_optionsTag is the tag number of the options element in an enum + // value descriptor proto. + EnumVal_optionsTag = 3 + // Service_nameTag is the tag number of the name element in a service + // descriptor proto. + Service_nameTag = 1 + // Service_methodsTag is the tag number of the methods element in a service + // descriptor proto. + Service_methodsTag = 2 + // Service_optionsTag is the tag number of the options element in a service + // descriptor proto. + Service_optionsTag = 3 + // Method_nameTag is the tag number of the name element in a method + // descriptor proto. + Method_nameTag = 1 + // Method_inputTag is the tag number of the input type element in a method + // descriptor proto. + Method_inputTag = 2 + // Method_outputTag is the tag number of the output type element in a method + // descriptor proto. + Method_outputTag = 3 + // Method_optionsTag is the tag number of the options element in a method + // descriptor proto. + Method_optionsTag = 4 + // Method_inputStreamTag is the tag number of the input stream flag in a + // method descriptor proto. + Method_inputStreamTag = 5 + // Method_outputStreamTag is the tag number of the output stream flag in a + // method descriptor proto. + Method_outputStreamTag = 6 + + // UninterpretedOptionsTag is the tag number of the uninterpreted options + // element. All *Options messages use the same tag for the field that stores + // uninterpreted options. + UninterpretedOptionsTag = 999 + + // Uninterpreted_nameTag is the tag number of the name element in an + // uninterpreted options proto. + Uninterpreted_nameTag = 2 + // Uninterpreted_identTag is the tag number of the identifier value in an + // uninterpreted options proto. + Uninterpreted_identTag = 3 + // Uninterpreted_posIntTag is the tag number of the positive int value in an + // uninterpreted options proto. + Uninterpreted_posIntTag = 4 + // Uninterpreted_negIntTag is the tag number of the negative int value in an + // uninterpreted options proto. + Uninterpreted_negIntTag = 5 + // Uninterpreted_doubleTag is the tag number of the double value in an + // uninterpreted options proto. + Uninterpreted_doubleTag = 6 + // Uninterpreted_stringTag is the tag number of the string value in an + // uninterpreted options proto. + Uninterpreted_stringTag = 7 + // Uninterpreted_aggregateTag is the tag number of the aggregate value in an + // uninterpreted options proto. + Uninterpreted_aggregateTag = 8 + // UninterpretedName_nameTag is the tag number of the name element in an + // uninterpreted option name proto. + UninterpretedName_nameTag = 1 +) + +// JsonName returns the default JSON name for a field with the given name. +// This mirrors the algorithm in protoc: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95 +func JsonName(name string) string { + var js []rune + nextUpper := false + for _, r := range name { + if r == '_' { + nextUpper = true + continue + } + if nextUpper { + nextUpper = false + js = append(js, unicode.ToUpper(r)) + } else { + js = append(js, r) + } + } + return string(js) +} + +// InitCap returns the given field name, but with the first letter capitalized. +func InitCap(name string) string { + r, sz := utf8.DecodeRuneInString(name) + return string(unicode.ToUpper(r)) + name[sz:] +} + +// CreatePrefixList returns a list of package prefixes to search when resolving +// a symbol name. If the given package is blank, it returns only the empty +// string. If the given package contains only one token, e.g. "foo", it returns +// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns +// successively shorter prefixes of the package and then the empty string. For +// example, for a package named "foo.bar.baz" it will return the following list: +// +// ["foo.bar.baz", "foo.bar", "foo", ""] +func CreatePrefixList(pkg string) []string { + if pkg == "" { + return []string{""} + } + + numDots := 0 + // one pass to pre-allocate the returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + numDots++ + } + } + if numDots == 0 { + return []string{pkg, ""} + } + + prefixes := make([]string, numDots+2) + // second pass to fill in returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + prefixes[numDots] = pkg[:i] + numDots-- + } + } + prefixes[0] = pkg + + return prefixes +} + +// GetMaxTag returns the max tag number allowed, based on whether a message uses +// message set wire format or not. +func GetMaxTag(isMessageSet bool) int32 { + if isMessageSet { + return MaxMessageSetTag + } + return MaxNormalTag +} diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go new file mode 100644 index 00000000..193bbe88 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/load.go @@ -0,0 +1,264 @@ +package desc + +import ( + "fmt" + "reflect" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/sourceinfo" + "github.com/jhump/protoreflect/internal" +) + +// The global cache is used to store descriptors that wrap items in +// protoregistry.GlobalTypes and protoregistry.GlobalFiles. This prevents +// repeating work to re-wrap underlying global descriptors. +var ( + // We put all wrapped file and message descriptors in this cache. + loadedDescriptors = lockingCache{cache: mapCache{}} + + // Unfortunately, we need a different mechanism for enums for + // compatibility with old APIs, which required that they were + // registered in a different way :( + loadedEnumsMu sync.RWMutex + loadedEnums = map[reflect.Type]*EnumDescriptor{} +) + +// LoadFileDescriptor creates a file descriptor using the bytes returned by +// proto.FileDescriptor. Descriptors are cached so that they do not need to be +// re-processed if the same file is fetched again later. +func LoadFileDescriptor(file string) (*FileDescriptor, error) { + d, err := sourceinfo.GlobalFiles.FindFileByPath(file) + if err == protoregistry.NotFound { + // for backwards compatibility, see if this matches a known old + // alias for the file (older versions of libraries that registered + // the files using incorrect/non-canonical paths) + if alt := internal.StdFileAliases[file]; alt != "" { + d, err = sourceinfo.GlobalFiles.FindFileByPath(alt) + } + } + if err != nil { + if err != protoregistry.NotFound { + return nil, internal.ErrNoSuchFile(file) + } + return nil, err + } + if fd := loadedDescriptors.get(d); fd != nil { + return fd.(*FileDescriptor), nil + } + + var fd *FileDescriptor + loadedDescriptors.withLock(func(cache descriptorCache) { + // double-check cache, in case it was concurrently added while + // we were waiting for the lock + f := cache.get(d) + if f != nil { + fd = f.(*FileDescriptor) + return + } + fd, err = wrapFile(d, cache) + }) + return fd, err +} + +// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by +// Message.Descriptor() for the given message type. If the given type is not recognized, +// then a nil descriptor is returned. +func LoadMessageDescriptor(message string) (*MessageDescriptor, error) { + mt, err := sourceinfo.GlobalTypes.FindMessageByName(protoreflect.FullName(message)) + if err != nil { + if err == protoregistry.NotFound { + return nil, nil + } + return nil, err + } + return loadMessageDescriptor(mt.Descriptor()) +} + +func loadMessageDescriptor(md protoreflect.MessageDescriptor) (*MessageDescriptor, error) { + d := loadedDescriptors.get(md) + if d != nil { + return d.(*MessageDescriptor), nil + } + + var err error + loadedDescriptors.withLock(func(cache descriptorCache) { + d, err = wrapMessage(md, cache) + }) + if err != nil { + return nil, err + } + return d.(*MessageDescriptor), err +} + +// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned +// by message.Descriptor() for the given message type. If the given type is not recognized, +// then a nil descriptor is returned. +func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) { + m, err := messageFromType(messageType) + if err != nil { + return nil, err + } + return LoadMessageDescriptorForMessage(m) +} + +// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto +// returned by message.Descriptor(). If the given type is not recognized, then a nil +// descriptor is returned. +func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) { + // efficiently handle dynamic messages + type descriptorable interface { + GetMessageDescriptor() *MessageDescriptor + } + if d, ok := message.(descriptorable); ok { + return d.GetMessageDescriptor(), nil + } + + var md protoreflect.MessageDescriptor + if m, ok := message.(protoreflect.ProtoMessage); ok { + md = m.ProtoReflect().Descriptor() + } else { + md = proto.MessageReflect(message).Descriptor() + } + return loadMessageDescriptor(sourceinfo.WrapMessage(md)) +} + +func messageFromType(mt reflect.Type) (proto.Message, error) { + if mt.Kind() != reflect.Ptr { + mt = reflect.PtrTo(mt) + } + m, ok := reflect.Zero(mt).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", mt) + } + return m, nil +} + +// interface implemented by all generated enums +type protoEnum interface { + EnumDescriptor() ([]byte, []int) +} + +// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because +// it is not useful since protoc-gen-go does not expose the name anywhere in generated +// code or register it in a way that is it accessible for reflection code. This also +// means we have to cache enum descriptors differently -- we can only cache them as +// they are requested, as opposed to caching all enum types whenever a file descriptor +// is cached. This is because we need to know the generated type of the enums, and we +// don't know that at the time of caching file descriptors. + +// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned +// by enum.EnumDescriptor() for the given enum type. +func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) { + // we cache descriptors using non-pointer type + if enumType.Kind() == reflect.Ptr { + enumType = enumType.Elem() + } + e := getEnumFromCache(enumType) + if e != nil { + return e, nil + } + enum, err := enumFromType(enumType) + if err != nil { + return nil, err + } + + return loadEnumDescriptor(enumType, enum) +} + +func getEnumFromCache(t reflect.Type) *EnumDescriptor { + loadedEnumsMu.RLock() + defer loadedEnumsMu.RUnlock() + return loadedEnums[t] +} + +func putEnumInCache(t reflect.Type, d *EnumDescriptor) { + loadedEnumsMu.Lock() + defer loadedEnumsMu.Unlock() + loadedEnums[t] = d +} + +// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto +// returned by enum.EnumDescriptor(). +func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) { + et := reflect.TypeOf(enum) + // we cache descriptors using non-pointer type + if et.Kind() == reflect.Ptr { + et = et.Elem() + enum = reflect.Zero(et).Interface().(protoEnum) + } + e := getEnumFromCache(et) + if e != nil { + return e, nil + } + + return loadEnumDescriptor(et, enum) +} + +func enumFromType(et reflect.Type) (protoEnum, error) { + e, ok := reflect.Zero(et).Interface().(protoEnum) + if !ok { + if et.Kind() != reflect.Ptr { + et = et.Elem() + } + e, ok = reflect.Zero(et).Interface().(protoEnum) + } + if !ok { + return nil, fmt.Errorf("failed to create enum from type: %v", et) + } + return e, nil +} + +func getDescriptorForEnum(enum protoEnum) (*descriptorpb.FileDescriptorProto, []int, error) { + fdb, path := enum.EnumDescriptor() + name := fmt.Sprintf("%T", enum) + fd, err := internal.DecodeFileDescriptor(name, fdb) + return fd, path, err +} + +func loadEnumDescriptor(et reflect.Type, enum protoEnum) (*EnumDescriptor, error) { + fdp, path, err := getDescriptorForEnum(enum) + if err != nil { + return nil, err + } + + fd, err := LoadFileDescriptor(fdp.GetName()) + if err != nil { + return nil, err + } + + ed := findEnum(fd, path) + putEnumInCache(et, ed) + return ed, nil +} + +func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor { + if len(path) == 1 { + return fd.GetEnumTypes()[path[0]] + } + md := fd.GetMessageTypes()[path[0]] + for _, i := range path[1 : len(path)-1] { + md = md.GetNestedMessageTypes()[i] + } + return md.GetNestedEnumTypes()[path[len(path)-1]] +} + +// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given +// extension description. +func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) { + file, err := LoadFileDescriptor(ext.Filename) + if err != nil { + return nil, err + } + field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor) + // make sure descriptor agrees with attributes of the ExtensionDesc + if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) || + field.GetNumber() != ext.Field { + return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name) + } + return field, nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go new file mode 100644 index 00000000..0bc09387 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go @@ -0,0 +1,724 @@ +package protoparse + +import ( + "fmt" + + "github.com/bufbuild/protocompile/ast" + + ast2 "github.com/jhump/protoreflect/desc/protoparse/ast" +) + +func convertAST(file *ast.FileNode) *ast2.FileNode { + elements := make([]ast2.FileElement, len(file.Decls)) + for i := range file.Decls { + elements[i] = convertASTFileElement(file, file.Decls[i]) + } + root := ast2.NewFileNode(convertASTSyntax(file, file.Syntax), elements) + eofInfo := file.NodeInfo(file.EOF) + root.FinalComments = convertASTComments(eofInfo.LeadingComments()) + root.FinalWhitespace = eofInfo.LeadingWhitespace() + return root +} + +func convertASTSyntax(f *ast.FileNode, s *ast.SyntaxNode) *ast2.SyntaxNode { + return ast2.NewSyntaxNode( + convertASTKeyword(f, s.Keyword), + convertASTRune(f, s.Equals), + convertASTString(f, s.Syntax), + convertASTRune(f, s.Semicolon), + ) +} + +func convertASTFileElement(f *ast.FileNode, el ast.FileElement) ast2.FileElement { + switch el := el.(type) { + case *ast.ImportNode: + return convertASTImport(f, el) + case *ast.PackageNode: + return convertASTPackage(f, el) + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.MessageNode: + return convertASTMessage(f, el) + case *ast.EnumNode: + return convertASTEnum(f, el) + case *ast.ExtendNode: + return convertASTExtend(f, el) + case *ast.ServiceNode: + return convertASTService(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.FileElement: %T", el)) + } +} + +func convertASTImport(f *ast.FileNode, imp *ast.ImportNode) *ast2.ImportNode { + var public, weak *ast2.KeywordNode + if imp.Public != nil { + public = convertASTKeyword(f, imp.Public) + } + if imp.Weak != nil { + weak = convertASTKeyword(f, imp.Weak) + } + return ast2.NewImportNode( + convertASTKeyword(f, imp.Keyword), + public, weak, + convertASTString(f, imp.Name), + convertASTRune(f, imp.Semicolon), + ) +} + +func convertASTPackage(f *ast.FileNode, p *ast.PackageNode) *ast2.PackageNode { + return ast2.NewPackageNode( + convertASTKeyword(f, p.Keyword), + convertASTIdent(f, p.Name), + convertASTRune(f, p.Semicolon), + ) +} + +func convertASTOption(f *ast.FileNode, o *ast.OptionNode) *ast2.OptionNode { + if o.Keyword == nil { + return ast2.NewCompactOptionNode( + convertASTOptionName(f, o.Name), + convertASTRune(f, o.Equals), + convertASTValue(f, o.Val), + ) + } + return ast2.NewOptionNode( + convertASTKeyword(f, o.Keyword), + convertASTOptionName(f, o.Name), + convertASTRune(f, o.Equals), + convertASTValue(f, o.Val), + convertASTRune(f, o.Semicolon), + ) +} + +func convertASTOptionName(f *ast.FileNode, n *ast.OptionNameNode) *ast2.OptionNameNode { + parts := make([]*ast2.FieldReferenceNode, len(n.Parts)) + for i := range n.Parts { + parts[i] = convertASTFieldReference(f, n.Parts[i]) + } + dots := make([]*ast2.RuneNode, len(n.Dots)) + for i := range n.Dots { + dots[i] = convertASTRune(f, n.Dots[i]) + } + return ast2.NewOptionNameNode(parts, dots) +} + +func convertASTFieldReference(f *ast.FileNode, n *ast.FieldReferenceNode) *ast2.FieldReferenceNode { + switch { + case n.IsExtension(): + return ast2.NewExtensionFieldReferenceNode( + convertASTRune(f, n.Open), + convertASTIdent(f, n.Name), + convertASTRune(f, n.Close), + ) + case n.IsAnyTypeReference(): + return ast2.NewAnyTypeReferenceNode( + convertASTRune(f, n.Open), + convertASTIdent(f, n.URLPrefix), + convertASTRune(f, n.Slash), + convertASTIdent(f, n.Name), + convertASTRune(f, n.Close), + ) + default: + return ast2.NewFieldReferenceNode(convertASTIdent(f, n.Name).(*ast2.IdentNode)) + } +} + +func convertASTMessage(f *ast.FileNode, m *ast.MessageNode) *ast2.MessageNode { + decls := make([]ast2.MessageElement, len(m.Decls)) + for i := range m.Decls { + decls[i] = convertASTMessageElement(f, m.Decls[i]) + } + return ast2.NewMessageNode( + convertASTKeyword(f, m.Keyword), + convertASTIdentToken(f, m.Name), + convertASTRune(f, m.OpenBrace), + decls, + convertASTRune(f, m.CloseBrace), + ) +} + +func convertASTMessageElement(f *ast.FileNode, el ast.MessageElement) ast2.MessageElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.FieldNode: + return convertASTField(f, el) + case *ast.MapFieldNode: + return convertASTMapField(f, el) + case *ast.OneOfNode: + return convertASTOneOf(f, el) + case *ast.GroupNode: + return convertASTGroup(f, el) + case *ast.MessageNode: + return convertASTMessage(f, el) + case *ast.EnumNode: + return convertASTEnum(f, el) + case *ast.ExtendNode: + return convertASTExtend(f, el) + case *ast.ExtensionRangeNode: + return convertASTExtensions(f, el) + case *ast.ReservedNode: + return convertASTReserved(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.MessageElement: %T", el)) + } +} + +func convertASTField(f *ast.FileNode, fld *ast.FieldNode) *ast2.FieldNode { + var lbl *ast2.KeywordNode + if fld.Label.KeywordNode != nil { + lbl = convertASTKeyword(f, fld.Label.KeywordNode) + } + var opts *ast2.CompactOptionsNode + if fld.Options != nil { + opts = convertASTCompactOptions(f, fld.Options) + } + return ast2.NewFieldNode( + lbl, + convertASTIdent(f, fld.FldType), + convertASTIdentToken(f, fld.Name), + convertASTRune(f, fld.Equals), + convertASTUintLiteral(f, fld.Tag), + opts, + convertASTRune(f, fld.Semicolon), + ) +} + +func convertASTMapField(f *ast.FileNode, fld *ast.MapFieldNode) *ast2.MapFieldNode { + var opts *ast2.CompactOptionsNode + if fld.Options != nil { + opts = convertASTCompactOptions(f, fld.Options) + } + return ast2.NewMapFieldNode( + convertASTMapFieldType(f, fld.MapType), + convertASTIdentToken(f, fld.Name), + convertASTRune(f, fld.Equals), + convertASTUintLiteral(f, fld.Tag), + opts, + convertASTRune(f, fld.Semicolon), + ) +} + +func convertASTMapFieldType(f *ast.FileNode, t *ast.MapTypeNode) *ast2.MapTypeNode { + return ast2.NewMapTypeNode( + convertASTKeyword(f, t.Keyword), + convertASTRune(f, t.OpenAngle), + convertASTIdentToken(f, t.KeyType), + convertASTRune(f, t.Comma), + convertASTIdent(f, t.ValueType), + convertASTRune(f, t.CloseAngle), + ) +} + +func convertASTGroup(f *ast.FileNode, g *ast.GroupNode) *ast2.GroupNode { + var lbl *ast2.KeywordNode + if g.Label.KeywordNode != nil { + lbl = convertASTKeyword(f, g.Label.KeywordNode) + } + var opts *ast2.CompactOptionsNode + if g.Options != nil { + opts = convertASTCompactOptions(f, g.Options) + } + decls := make([]ast2.MessageElement, len(g.Decls)) + for i := range g.Decls { + decls[i] = convertASTMessageElement(f, g.Decls[i]) + } + return ast2.NewGroupNode( + lbl, + convertASTKeyword(f, g.Keyword), + convertASTIdentToken(f, g.Name), + convertASTRune(f, g.Equals), + convertASTUintLiteral(f, g.Tag), + opts, + convertASTRune(f, g.OpenBrace), + decls, + convertASTRune(f, g.CloseBrace), + ) +} + +func convertASTOneOf(f *ast.FileNode, oo *ast.OneOfNode) *ast2.OneOfNode { + decls := make([]ast2.OneOfElement, len(oo.Decls)) + for i := range oo.Decls { + decls[i] = convertASTOneOfElement(f, oo.Decls[i]) + } + return ast2.NewOneOfNode( + convertASTKeyword(f, oo.Keyword), + convertASTIdentToken(f, oo.Name), + convertASTRune(f, oo.OpenBrace), + decls, + convertASTRune(f, oo.CloseBrace), + ) +} + +func convertASTOneOfElement(f *ast.FileNode, el ast.OneOfElement) ast2.OneOfElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.FieldNode: + return convertASTField(f, el) + case *ast.GroupNode: + return convertASTGroup(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.OneOfElement: %T", el)) + } +} + +func convertASTExtensions(f *ast.FileNode, e *ast.ExtensionRangeNode) *ast2.ExtensionRangeNode { + var opts *ast2.CompactOptionsNode + if e.Options != nil { + opts = convertASTCompactOptions(f, e.Options) + } + ranges := make([]*ast2.RangeNode, len(e.Ranges)) + for i := range e.Ranges { + ranges[i] = convertASTRange(f, e.Ranges[i]) + } + commas := make([]*ast2.RuneNode, len(e.Commas)) + for i := range e.Commas { + commas[i] = convertASTRune(f, e.Commas[i]) + } + return ast2.NewExtensionRangeNode( + convertASTKeyword(f, e.Keyword), + ranges, commas, opts, + convertASTRune(f, e.Semicolon), + ) +} + +func convertASTReserved(f *ast.FileNode, r *ast.ReservedNode) *ast2.ReservedNode { + ranges := make([]*ast2.RangeNode, len(r.Ranges)) + for i := range r.Ranges { + ranges[i] = convertASTRange(f, r.Ranges[i]) + } + commas := make([]*ast2.RuneNode, len(r.Commas)) + for i := range r.Commas { + commas[i] = convertASTRune(f, r.Commas[i]) + } + names := make([]ast2.StringValueNode, len(r.Names)) + for i := range r.Names { + names[i] = convertASTString(f, r.Names[i]) + } + if len(r.Ranges) > 0 { + return ast2.NewReservedRangesNode( + convertASTKeyword(f, r.Keyword), + ranges, commas, + convertASTRune(f, r.Semicolon), + ) + } + return ast2.NewReservedNamesNode( + convertASTKeyword(f, r.Keyword), + names, commas, + convertASTRune(f, r.Semicolon), + ) +} + +func convertASTRange(f *ast.FileNode, r *ast.RangeNode) *ast2.RangeNode { + var to, max *ast2.KeywordNode + var end ast2.IntValueNode + if r.To != nil { + to = convertASTKeyword(f, r.To) + } + if r.Max != nil { + max = convertASTKeyword(f, r.Max) + } + if r.EndVal != nil { + end = convertASTInt(f, r.EndVal) + } + return ast2.NewRangeNode( + convertASTInt(f, r.StartVal), + to, end, max, + ) +} + +func convertASTEnum(f *ast.FileNode, e *ast.EnumNode) *ast2.EnumNode { + decls := make([]ast2.EnumElement, len(e.Decls)) + for i := range e.Decls { + decls[i] = convertASTEnumElement(f, e.Decls[i]) + } + return ast2.NewEnumNode( + convertASTKeyword(f, e.Keyword), + convertASTIdentToken(f, e.Name), + convertASTRune(f, e.OpenBrace), + decls, + convertASTRune(f, e.CloseBrace), + ) +} + +func convertASTEnumElement(f *ast.FileNode, el ast.EnumElement) ast2.EnumElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.EnumValueNode: + return convertASTEnumValue(f, el) + case *ast.ReservedNode: + return convertASTReserved(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.EnumElement: %T", el)) + } +} + +func convertASTEnumValue(f *ast.FileNode, e *ast.EnumValueNode) *ast2.EnumValueNode { + var opts *ast2.CompactOptionsNode + if e.Options != nil { + opts = convertASTCompactOptions(f, e.Options) + } + return ast2.NewEnumValueNode( + convertASTIdentToken(f, e.Name), + convertASTRune(f, e.Equals), + convertASTInt(f, e.Number), + opts, + convertASTRune(f, e.Semicolon), + ) +} + +func convertASTExtend(f *ast.FileNode, e *ast.ExtendNode) *ast2.ExtendNode { + decls := make([]ast2.ExtendElement, len(e.Decls)) + for i := range e.Decls { + decls[i] = convertASTExtendElement(f, e.Decls[i]) + } + return ast2.NewExtendNode( + convertASTKeyword(f, e.Keyword), + convertASTIdent(f, e.Extendee), + convertASTRune(f, e.OpenBrace), + decls, + convertASTRune(f, e.CloseBrace), + ) +} + +func convertASTExtendElement(f *ast.FileNode, el ast.ExtendElement) ast2.ExtendElement { + switch el := el.(type) { + case *ast.FieldNode: + return convertASTField(f, el) + case *ast.GroupNode: + return convertASTGroup(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.ExtendElement: %T", el)) + } +} + +func convertASTService(f *ast.FileNode, s *ast.ServiceNode) *ast2.ServiceNode { + decls := make([]ast2.ServiceElement, len(s.Decls)) + for i := range s.Decls { + decls[i] = convertASTServiceElement(f, s.Decls[i]) + } + return ast2.NewServiceNode( + convertASTKeyword(f, s.Keyword), + convertASTIdentToken(f, s.Name), + convertASTRune(f, s.OpenBrace), + decls, + convertASTRune(f, s.CloseBrace), + ) +} + +func convertASTServiceElement(f *ast.FileNode, el ast.ServiceElement) ast2.ServiceElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.RPCNode: + return convertASTMethod(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.ServiceElement: %T", el)) + } +} + +func convertASTMethod(f *ast.FileNode, m *ast.RPCNode) *ast2.RPCNode { + if m.OpenBrace == nil { + return ast2.NewRPCNode( + convertASTKeyword(f, m.Keyword), + convertASTIdentToken(f, m.Name), + convertASTMethodType(f, m.Input), + convertASTKeyword(f, m.Returns), + convertASTMethodType(f, m.Output), + convertASTRune(f, m.Semicolon), + ) + } + decls := make([]ast2.RPCElement, len(m.Decls)) + for i := range m.Decls { + decls[i] = convertASTMethodElement(f, m.Decls[i]) + } + return ast2.NewRPCNodeWithBody( + convertASTKeyword(f, m.Keyword), + convertASTIdentToken(f, m.Name), + convertASTMethodType(f, m.Input), + convertASTKeyword(f, m.Returns), + convertASTMethodType(f, m.Output), + convertASTRune(f, m.OpenBrace), + decls, + convertASTRune(f, m.CloseBrace), + ) +} + +func convertASTMethodElement(f *ast.FileNode, el ast.RPCElement) ast2.RPCElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.RPCElement: %T", el)) + } +} + +func convertASTMethodType(f *ast.FileNode, t *ast.RPCTypeNode) *ast2.RPCTypeNode { + var stream *ast2.KeywordNode + if t.Stream != nil { + stream = convertASTKeyword(f, t.Stream) + } + return ast2.NewRPCTypeNode( + convertASTRune(f, t.OpenParen), + stream, + convertASTIdent(f, t.MessageType), + convertASTRune(f, t.CloseParen), + ) +} + +func convertASTCompactOptions(f *ast.FileNode, opts *ast.CompactOptionsNode) *ast2.CompactOptionsNode { + elems := make([]*ast2.OptionNode, len(opts.Options)) + for i := range opts.Options { + elems[i] = convertASTOption(f, opts.Options[i]) + } + commas := make([]*ast2.RuneNode, len(opts.Commas)) + for i := range opts.Commas { + commas[i] = convertASTRune(f, opts.Commas[i]) + } + return ast2.NewCompactOptionsNode( + convertASTRune(f, opts.OpenBracket), + elems, commas, + convertASTRune(f, opts.CloseBracket), + ) +} + +func convertASTEmpty(f *ast.FileNode, e *ast.EmptyDeclNode) *ast2.EmptyDeclNode { + return ast2.NewEmptyDeclNode(convertASTRune(f, e.Semicolon)) +} + +func convertASTValue(f *ast.FileNode, v ast.ValueNode) ast2.ValueNode { + switch v := v.(type) { + case *ast.IdentNode: + return convertASTIdentToken(f, v) + case *ast.CompoundIdentNode: + return convertASTCompoundIdent(f, v) + case *ast.StringLiteralNode: + return convertASTStringLiteral(f, v) + case *ast.CompoundStringLiteralNode: + return convertASTCompoundStringLiteral(f, v) + case *ast.UintLiteralNode: + return convertASTUintLiteral(f, v) + case *ast.PositiveUintLiteralNode: + return convertASTPositiveUintLiteral(f, v) + case *ast.NegativeIntLiteralNode: + return convertASTNegativeIntLiteral(f, v) + case *ast.FloatLiteralNode: + return convertASTFloatLiteral(f, v) + case *ast.SpecialFloatLiteralNode: + return convertASTSpecialFloatLiteral(f, v) + case *ast.SignedFloatLiteralNode: + return convertASTSignedFloatLiteral(f, v) + case *ast.ArrayLiteralNode: + return convertASTArrayLiteral(f, v) + case *ast.MessageLiteralNode: + return convertASTMessageLiteral(f, v) + default: + panic(fmt.Sprintf("unrecognized type of ast.ValueNode: %T", v)) + } +} + +func convertASTIdent(f *ast.FileNode, ident ast.IdentValueNode) ast2.IdentValueNode { + switch ident := ident.(type) { + case *ast.IdentNode: + return convertASTIdentToken(f, ident) + case *ast.CompoundIdentNode: + return convertASTCompoundIdent(f, ident) + default: + panic(fmt.Sprintf("unrecognized type of ast.IdentValueNode: %T", ident)) + } +} + +func convertASTIdentToken(f *ast.FileNode, ident *ast.IdentNode) *ast2.IdentNode { + return ast2.NewIdentNode(ident.Val, convertASTTokenInfo(f, ident.Token())) +} + +func convertASTCompoundIdent(f *ast.FileNode, ident *ast.CompoundIdentNode) *ast2.CompoundIdentNode { + var leadingDot *ast2.RuneNode + if ident.LeadingDot != nil { + leadingDot = convertASTRune(f, ident.LeadingDot) + } + components := make([]*ast2.IdentNode, len(ident.Components)) + for i := range ident.Components { + components[i] = convertASTIdentToken(f, ident.Components[i]) + } + dots := make([]*ast2.RuneNode, len(ident.Dots)) + for i := range ident.Dots { + dots[i] = convertASTRune(f, ident.Dots[i]) + } + return ast2.NewCompoundIdentNode(leadingDot, components, dots) +} + +func convertASTString(f *ast.FileNode, str ast.StringValueNode) ast2.StringValueNode { + switch str := str.(type) { + case *ast.StringLiteralNode: + return convertASTStringLiteral(f, str) + case *ast.CompoundStringLiteralNode: + return convertASTCompoundStringLiteral(f, str) + default: + panic(fmt.Sprintf("unrecognized type of ast.StringValueNode: %T", str)) + } +} + +func convertASTStringLiteral(f *ast.FileNode, str *ast.StringLiteralNode) *ast2.StringLiteralNode { + return ast2.NewStringLiteralNode(str.Val, convertASTTokenInfo(f, str.Token())) +} + +func convertASTCompoundStringLiteral(f *ast.FileNode, str *ast.CompoundStringLiteralNode) *ast2.CompoundStringLiteralNode { + children := str.Children() + components := make([]*ast2.StringLiteralNode, len(children)) + for i := range children { + components[i] = convertASTStringLiteral(f, children[i].(*ast.StringLiteralNode)) + } + return ast2.NewCompoundLiteralStringNode(components...) +} + +func convertASTInt(f *ast.FileNode, n ast.IntValueNode) ast2.IntValueNode { + switch n := n.(type) { + case *ast.UintLiteralNode: + return convertASTUintLiteral(f, n) + case *ast.PositiveUintLiteralNode: + return convertASTPositiveUintLiteral(f, n) + case *ast.NegativeIntLiteralNode: + return convertASTNegativeIntLiteral(f, n) + default: + panic(fmt.Sprintf("unrecognized type of ast.IntValueNode: %T", n)) + } +} + +func convertASTUintLiteral(f *ast.FileNode, n *ast.UintLiteralNode) *ast2.UintLiteralNode { + return ast2.NewUintLiteralNode(n.Val, convertASTTokenInfo(f, n.Token())) +} + +func convertASTPositiveUintLiteral(f *ast.FileNode, n *ast.PositiveUintLiteralNode) *ast2.PositiveUintLiteralNode { + return ast2.NewPositiveUintLiteralNode(convertASTRune(f, n.Plus), convertASTUintLiteral(f, n.Uint)) +} + +func convertASTNegativeIntLiteral(f *ast.FileNode, n *ast.NegativeIntLiteralNode) *ast2.NegativeIntLiteralNode { + return ast2.NewNegativeIntLiteralNode(convertASTRune(f, n.Minus), convertASTUintLiteral(f, n.Uint)) +} + +func convertASTFloat(f *ast.FileNode, n ast.FloatValueNode) ast2.FloatValueNode { + switch n := n.(type) { + case *ast.FloatLiteralNode: + return convertASTFloatLiteral(f, n) + case *ast.SpecialFloatLiteralNode: + return convertASTSpecialFloatLiteral(f, n) + case *ast.UintLiteralNode: + return convertASTUintLiteral(f, n) + default: + panic(fmt.Sprintf("unrecognized type of ast.FloatValueNode: %T", n)) + } +} + +func convertASTFloatLiteral(f *ast.FileNode, n *ast.FloatLiteralNode) *ast2.FloatLiteralNode { + return ast2.NewFloatLiteralNode(n.Val, convertASTTokenInfo(f, n.Token())) +} + +func convertASTSpecialFloatLiteral(f *ast.FileNode, n *ast.SpecialFloatLiteralNode) *ast2.SpecialFloatLiteralNode { + return ast2.NewSpecialFloatLiteralNode(convertASTKeyword(f, n.KeywordNode)) +} + +func convertASTSignedFloatLiteral(f *ast.FileNode, n *ast.SignedFloatLiteralNode) *ast2.SignedFloatLiteralNode { + return ast2.NewSignedFloatLiteralNode(convertASTRune(f, n.Sign), convertASTFloat(f, n.Float)) +} + +func convertASTArrayLiteral(f *ast.FileNode, ar *ast.ArrayLiteralNode) *ast2.ArrayLiteralNode { + vals := make([]ast2.ValueNode, len(ar.Elements)) + for i := range ar.Elements { + vals[i] = convertASTValue(f, ar.Elements[i]) + } + commas := make([]*ast2.RuneNode, len(ar.Commas)) + for i := range ar.Commas { + commas[i] = convertASTRune(f, ar.Commas[i]) + } + return ast2.NewArrayLiteralNode( + convertASTRune(f, ar.OpenBracket), + vals, commas, + convertASTRune(f, ar.CloseBracket), + ) +} + +func convertASTMessageLiteral(f *ast.FileNode, m *ast.MessageLiteralNode) *ast2.MessageLiteralNode { + fields := make([]*ast2.MessageFieldNode, len(m.Elements)) + for i := range m.Elements { + fields[i] = convertASTMessageLiteralField(f, m.Elements[i]) + } + seps := make([]*ast2.RuneNode, len(m.Seps)) + for i := range m.Seps { + if m.Seps[i] != nil { + seps[i] = convertASTRune(f, m.Seps[i]) + } + } + return ast2.NewMessageLiteralNode( + convertASTRune(f, m.Open), + fields, seps, + convertASTRune(f, m.Close), + ) +} + +func convertASTMessageLiteralField(f *ast.FileNode, fld *ast.MessageFieldNode) *ast2.MessageFieldNode { + var sep *ast2.RuneNode + if fld.Sep != nil { + sep = convertASTRune(f, fld.Sep) + } + return ast2.NewMessageFieldNode( + convertASTFieldReference(f, fld.Name), + sep, + convertASTValue(f, fld.Val), + ) +} + +func convertASTKeyword(f *ast.FileNode, k *ast.KeywordNode) *ast2.KeywordNode { + return ast2.NewKeywordNode(k.Val, convertASTTokenInfo(f, k.Token())) +} + +func convertASTRune(f *ast.FileNode, r *ast.RuneNode) *ast2.RuneNode { + return ast2.NewRuneNode(r.Rune, convertASTTokenInfo(f, r.Token())) +} + +func convertASTTokenInfo(f *ast.FileNode, tok ast.Token) ast2.TokenInfo { + info := f.TokenInfo(tok) + return ast2.TokenInfo{ + PosRange: ast2.PosRange{ + Start: info.Start(), + End: info.End(), + }, + RawText: info.RawText(), + LeadingWhitespace: info.LeadingWhitespace(), + LeadingComments: convertASTComments(info.LeadingComments()), + TrailingComments: convertASTComments(info.TrailingComments()), + } +} + +func convertASTComments(comments ast.Comments) []ast2.Comment { + results := make([]ast2.Comment, comments.Len()) + for i := 0; i < comments.Len(); i++ { + cmt := comments.Index(i) + results[i] = ast2.Comment{ + PosRange: ast2.PosRange{ + Start: cmt.Start(), + End: cmt.End(), + }, + LeadingWhitespace: cmt.LeadingWhitespace(), + Text: cmt.RawText(), + } + } + return results +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go new file mode 100644 index 00000000..e8902000 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go @@ -0,0 +1,27 @@ +// Package ast defines types for modeling the AST (Abstract Syntax +// Tree) for the protocol buffers source language. +// +// All nodes of the tree implement the Node interface. Leaf nodes in the +// tree implement TerminalNode and all others implement CompositeNode. +// The root of the tree for a proto source file is a *FileNode. +// +// Comments are not represented as nodes in the tree. Instead, they are +// attached to all terminal nodes in the tree. So, when lexing, comments +// are accumulated until the next non-comment token is found. The AST +// model in this package thus provides access to all comments in the +// file, regardless of location (unlike the SourceCodeInfo present in +// descriptor protos, which are lossy). The comments associated with a +// a non-leaf/non-token node (i.e. a CompositeNode) come from the first +// and last nodes in its sub-tree. +// +// Creation of AST nodes should use the factory functions in this +// package instead of struct literals. Some factory functions accept +// optional arguments, which means the arguments can be nil. If nil +// values are provided for other (non-optional) arguments, the resulting +// node may be invalid and cause panics later in the program. +// +// This package defines numerous interfaces. However, user code should +// not attempt to implement any of them. Most consumers of an AST will +// not work correctly if they encounter concrete implementations other +// than the ones defined in this package. +package ast diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go new file mode 100644 index 00000000..446a6a01 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go @@ -0,0 +1,154 @@ +package ast + +import "fmt" + +// EnumNode represents an enum declaration. Example: +// +// enum Foo { BAR = 0; BAZ = 1 } +type EnumNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []EnumElement + CloseBrace *RuneNode +} + +func (*EnumNode) fileElement() {} +func (*EnumNode) msgElement() {} + +// NewEnumNode creates a new *EnumNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid enum, which must have at least one value. +// - keyword: The token corresponding to the "enum" keyword. +// - name: The token corresponding to the enum's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the enum body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewEnumNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []EnumElement, closeBrace *RuneNode) *EnumNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *EnumValueNode, *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid EnumElement type: %T", decl)) + } + } + + return &EnumNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + CloseBrace: closeBrace, + Decls: decls, + } +} + +// EnumElement is an interface implemented by all AST nodes that can +// appear in the body of an enum declaration. +type EnumElement interface { + Node + enumElement() +} + +var _ EnumElement = (*OptionNode)(nil) +var _ EnumElement = (*EnumValueNode)(nil) +var _ EnumElement = (*ReservedNode)(nil) +var _ EnumElement = (*EmptyDeclNode)(nil) + +// EnumValueDeclNode is a placeholder interface for AST nodes that represent +// enum values. This allows NoSourceNode to be used in place of *EnumValueNode +// for some usages. +type EnumValueDeclNode interface { + Node + GetName() Node + GetNumber() Node +} + +var _ EnumValueDeclNode = (*EnumValueNode)(nil) +var _ EnumValueDeclNode = NoSourceNode{} + +// EnumNode represents an enum declaration. Example: +// +// UNSET = 0 [deprecated = true]; +type EnumValueNode struct { + compositeNode + Name *IdentNode + Equals *RuneNode + Number IntValueNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*EnumValueNode) enumElement() {} + +// NewEnumValueNode creates a new *EnumValueNode. All arguments must be non-nil +// except opts which is only non-nil if the declaration included options. +// - name: The token corresponding to the enum value's name. +// - equals: The token corresponding to the '=' rune after the name. +// - number: The token corresponding to the enum value's number. +// - opts: Optional set of enum value options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, opts *CompactOptionsNode, semicolon *RuneNode) *EnumValueNode { + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if number == nil { + panic("number is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 4 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name, equals, number) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + return &EnumValueNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Number: number, + Options: opts, + Semicolon: semicolon, + } +} + +func (e *EnumValueNode) GetName() Node { + return e.Name +} + +func (e *EnumValueNode) GetNumber() Node { + return e.Number +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go new file mode 100644 index 00000000..7ec9391b --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go @@ -0,0 +1,659 @@ +package ast + +import "fmt" + +// FieldDeclNode is a node in the AST that defines a field. This includes +// normal message fields as well as extensions. There are multiple types +// of AST nodes that declare fields: +// - *FieldNode +// - *GroupNode +// - *MapFieldNode +// - *SyntheticMapField +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type FieldDeclNode interface { + Node + FieldLabel() Node + FieldName() Node + FieldType() Node + FieldTag() Node + FieldExtendee() Node + GetGroupKeyword() Node + GetOptions() *CompactOptionsNode +} + +var _ FieldDeclNode = (*FieldNode)(nil) +var _ FieldDeclNode = (*GroupNode)(nil) +var _ FieldDeclNode = (*MapFieldNode)(nil) +var _ FieldDeclNode = (*SyntheticMapField)(nil) +var _ FieldDeclNode = NoSourceNode{} + +// FieldNode represents a normal field declaration (not groups or maps). It +// can represent extension fields as well as non-extension fields (both inside +// of messages and inside of one-ofs). Example: +// +// optional string foo = 1; +type FieldNode struct { + compositeNode + Label FieldLabel + FldType IdentValueNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode + + // This is an up-link to the containing *ExtendNode for fields + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*FieldNode) msgElement() {} +func (*FieldNode) oneOfElement() {} +func (*FieldNode) extendElement() {} + +// NewFieldNode creates a new *FieldNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - fieldType: The token corresponding to the field's type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *FieldNode { + if fieldType == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 5 + if label != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, fieldType, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + + return &FieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + FldType: fieldType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *FieldNode) FieldLabel() Node { + // proto3 fields and fields inside one-ofs will not have a label and we need + // this check in order to return a nil node -- otherwise we'd return a + // non-nil node that has a nil pointer value in it :/ + if n.Label.KeywordNode == nil { + return nil + } + return n.Label.KeywordNode +} + +func (n *FieldNode) FieldName() Node { + return n.Name +} + +func (n *FieldNode) FieldType() Node { + return n.FldType +} + +func (n *FieldNode) FieldTag() Node { + return n.Tag +} + +func (n *FieldNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *FieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *FieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +// FieldLabel represents the label of a field, which indicates its cardinality +// (i.e. whether it is optional, required, or repeated). +type FieldLabel struct { + *KeywordNode + Repeated bool + Required bool +} + +func newFieldLabel(lbl *KeywordNode) FieldLabel { + repeated, required := false, false + if lbl != nil { + repeated = lbl.Val == "repeated" + required = lbl.Val == "required" + } + return FieldLabel{ + KeywordNode: lbl, + Repeated: repeated, + Required: required, + } +} + +// IsPresent returns true if a label keyword was present in the declaration +// and false if it was absent. +func (f *FieldLabel) IsPresent() bool { + return f.KeywordNode != nil +} + +// GroupNode represents a group declaration, which doubles as a field and inline +// message declaration. It can represent extension fields as well as +// non-extension fields (both inside of messages and inside of one-ofs). +// Example: +// +// optional group Key = 4 { +// optional uint64 id = 1; +// optional string name = 2; +// } +type GroupNode struct { + compositeNode + Label FieldLabel + Keyword *KeywordNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + MessageBody + + // This is an up-link to the containing *ExtendNode for groups + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*GroupNode) msgElement() {} +func (*GroupNode) oneOfElement() {} +func (*GroupNode) extendElement() {} + +// NewGroupNode creates a new *GroupNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - keyword: The token corresponding to the "group" keyword. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the group body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *GroupNode { + if keyword == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + numChildren := 6 + len(decls) + if label != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, keyword, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &GroupNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + Keyword: keyword, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *GroupNode) FieldLabel() Node { + if n.Label.KeywordNode == nil { + // return nil interface to indicate absence, not a typed nil + return nil + } + return n.Label.KeywordNode +} + +func (n *GroupNode) FieldName() Node { + return n.Name +} + +func (n *GroupNode) FieldType() Node { + return n.Keyword +} + +func (n *GroupNode) FieldTag() Node { + return n.Tag +} + +func (n *GroupNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *GroupNode) GetGroupKeyword() Node { + return n.Keyword +} + +func (n *GroupNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *GroupNode) MessageName() Node { + return n.Name +} + +// OneOfDeclNode is a node in the AST that defines a oneof. There are +// multiple types of AST nodes that declare oneofs: +// - *OneOfNode +// - *SyntheticOneOf +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type OneOfDeclNode interface { + Node + OneOfName() Node +} + +// OneOfNode represents a one-of declaration. Example: +// +// oneof query { +// string by_name = 2; +// Type by_type = 3; +// Address by_address = 4; +// Labels by_label = 5; +// } +type OneOfNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []OneOfElement + CloseBrace *RuneNode +} + +func (*OneOfNode) msgElement() {} + +// NewOneOfNode creates a new *OneOfNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid oneof, which must have at least one field. +// - keyword: The token corresponding to the "oneof" keyword. +// - name: The token corresponding to the oneof's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the oneof body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewOneOfNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneOfElement, closeBrace *RuneNode) *OneOfNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *FieldNode, *GroupNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid OneOfElement type: %T", decl)) + } + } + + return &OneOfNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *OneOfNode) OneOfName() Node { + return n.Name +} + +// SyntheticOneOf is not an actual node in the AST but a synthetic node +// that implements OneOfDeclNode. These are used to represent the implicit +// oneof declarations that enclose "proto3 optional" fields. +type SyntheticOneOf struct { + Field *FieldNode +} + +// NewSyntheticOneOf creates a new *SyntheticOneOf that corresponds to the +// given proto3 optional field. +func NewSyntheticOneOf(field *FieldNode) *SyntheticOneOf { + return &SyntheticOneOf{Field: field} +} + +func (n *SyntheticOneOf) Start() *SourcePos { + return n.Field.Start() +} + +func (n *SyntheticOneOf) End() *SourcePos { + return n.Field.End() +} + +func (n *SyntheticOneOf) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticOneOf) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticOneOf) OneOfName() Node { + return n.Field.FieldName() +} + +// OneOfElement is an interface implemented by all AST nodes that can +// appear in the body of a oneof declaration. +type OneOfElement interface { + Node + oneOfElement() +} + +var _ OneOfElement = (*OptionNode)(nil) +var _ OneOfElement = (*FieldNode)(nil) +var _ OneOfElement = (*GroupNode)(nil) +var _ OneOfElement = (*EmptyDeclNode)(nil) + +// MapTypeNode represents the type declaration for a map field. It defines +// both the key and value types for the map. Example: +// +// map +type MapTypeNode struct { + compositeNode + Keyword *KeywordNode + OpenAngle *RuneNode + KeyType *IdentNode + Comma *RuneNode + ValueType IdentValueNode + CloseAngle *RuneNode +} + +// NewMapTypeNode creates a new *MapTypeNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "map" keyword. +// - openAngle: The token corresponding to the "<" rune after the keyword. +// - keyType: The token corresponding to the key type for the map. +// - comma: The token corresponding to the "," rune between key and value types. +// - valType: The token corresponding to the value type for the map. +// - closeAngle: The token corresponding to the ">" rune that ends the declaration. +func NewMapTypeNode(keyword *KeywordNode, openAngle *RuneNode, keyType *IdentNode, comma *RuneNode, valType IdentValueNode, closeAngle *RuneNode) *MapTypeNode { + if keyword == nil { + panic("keyword is nil") + } + if openAngle == nil { + panic("openAngle is nil") + } + if keyType == nil { + panic("keyType is nil") + } + if comma == nil { + panic("comma is nil") + } + if valType == nil { + panic("valType is nil") + } + if closeAngle == nil { + panic("closeAngle is nil") + } + children := []Node{keyword, openAngle, keyType, comma, valType, closeAngle} + return &MapTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + OpenAngle: openAngle, + KeyType: keyType, + Comma: comma, + ValueType: valType, + CloseAngle: closeAngle, + } +} + +// MapFieldNode represents a map field declaration. Example: +// +// map replacements = 3 [deprecated = true]; +type MapFieldNode struct { + compositeNode + MapType *MapTypeNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*MapFieldNode) msgElement() {} + +// NewMapFieldNode creates a new *MapFieldNode. All arguments must be non-nil +// except opts, which may be nil. +// - mapType: The token corresponding to the map type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewMapFieldNode(mapType *MapTypeNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *MapFieldNode { + if mapType == nil { + panic("mapType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 5 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, mapType, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + + return &MapFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + MapType: mapType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *MapFieldNode) FieldLabel() Node { + return nil +} + +func (n *MapFieldNode) FieldName() Node { + return n.Name +} + +func (n *MapFieldNode) FieldType() Node { + return n.MapType +} + +func (n *MapFieldNode) FieldTag() Node { + return n.Tag +} + +func (n *MapFieldNode) FieldExtendee() Node { + return nil +} + +func (n *MapFieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *MapFieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *MapFieldNode) MessageName() Node { + return n.Name +} + +func (n *MapFieldNode) KeyField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.KeyType, 1) +} + +func (n *MapFieldNode) ValueField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.ValueType, 2) +} + +// SyntheticMapField is not an actual node in the AST but a synthetic node +// that implements FieldDeclNode. These are used to represent the implicit +// field declarations of the "key" and "value" fields in a map entry. +type SyntheticMapField struct { + Ident IdentValueNode + Tag *UintLiteralNode +} + +// NewSyntheticMapField creates a new *SyntheticMapField for the given +// identifier (either a key or value type in a map declaration) and tag +// number (1 for key, 2 for value). +func NewSyntheticMapField(ident IdentValueNode, tagNum uint64) *SyntheticMapField { + tag := &UintLiteralNode{ + terminalNode: terminalNode{ + posRange: PosRange{Start: *ident.Start(), End: *ident.End()}, + }, + Val: tagNum, + } + return &SyntheticMapField{Ident: ident, Tag: tag} +} + +func (n *SyntheticMapField) Start() *SourcePos { + return n.Ident.Start() +} + +func (n *SyntheticMapField) End() *SourcePos { + return n.Ident.End() +} + +func (n *SyntheticMapField) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) FieldLabel() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldName() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldType() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldTag() Node { + return n.Tag +} + +func (n *SyntheticMapField) FieldExtendee() Node { + return nil +} + +func (n *SyntheticMapField) GetGroupKeyword() Node { + return nil +} + +func (n *SyntheticMapField) GetOptions() *CompactOptionsNode { + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go new file mode 100644 index 00000000..2cef3c6c --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go @@ -0,0 +1,234 @@ +package ast + +import "fmt" + +// FileDeclNode is a placeholder interface for AST nodes that represent files. +// This allows NoSourceNode to be used in place of *FileNode for some usages. +type FileDeclNode interface { + Node + GetSyntax() Node +} + +var _ FileDeclNode = (*FileNode)(nil) +var _ FileDeclNode = NoSourceNode{} + +// FileNode is the root of the AST hierarchy. It represents an entire +// protobuf source file. +type FileNode struct { + compositeNode + Syntax *SyntaxNode // nil if file has no syntax declaration + Decls []FileElement + + // Any comments that follow the last token in the file. + FinalComments []Comment + // Any whitespace at the end of the file (after the last token or + // last comment in the file). + FinalWhitespace string +} + +// NewFileNode creates a new *FileNode. The syntax parameter is optional. If it +// is absent, it means the file had no syntax declaration. +// +// This function panics if the concrete type of any element of decls is not +// from this package. +func NewFileNode(syntax *SyntaxNode, decls []FileElement) *FileNode { + numChildren := len(decls) + if syntax != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if syntax != nil { + children = append(children, syntax) + } + for _, decl := range decls { + children = append(children, decl) + } + + for _, decl := range decls { + switch decl := decl.(type) { + case *PackageNode, *ImportNode, *OptionNode, *MessageNode, + *EnumNode, *ExtendNode, *ServiceNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid FileElement type: %T", decl)) + } + } + + return &FileNode{ + compositeNode: compositeNode{ + children: children, + }, + Syntax: syntax, + Decls: decls, + } +} + +func NewEmptyFileNode(filename string) *FileNode { + return &FileNode{ + compositeNode: compositeNode{ + children: []Node{NewNoSourceNode(filename)}, + }, + } +} + +func (f *FileNode) GetSyntax() Node { + return f.Syntax +} + +// FileElement is an interface implemented by all AST nodes that are +// allowed as top-level declarations in the file. +type FileElement interface { + Node + fileElement() +} + +var _ FileElement = (*ImportNode)(nil) +var _ FileElement = (*PackageNode)(nil) +var _ FileElement = (*OptionNode)(nil) +var _ FileElement = (*MessageNode)(nil) +var _ FileElement = (*EnumNode)(nil) +var _ FileElement = (*ExtendNode)(nil) +var _ FileElement = (*ServiceNode)(nil) +var _ FileElement = (*EmptyDeclNode)(nil) + +// SyntaxNode represents a syntax declaration, which if present must be +// the first non-comment content. Example: +// +// syntax = "proto2"; +// +// Files that don't have a syntax node are assumed to use proto2 syntax. +type SyntaxNode struct { + compositeNode + Keyword *KeywordNode + Equals *RuneNode + Syntax StringValueNode + Semicolon *RuneNode +} + +// NewSyntaxNode creates a new *SyntaxNode. All four arguments must be non-nil: +// - keyword: The token corresponding to the "syntax" keyword. +// - equals: The token corresponding to the "=" rune. +// - syntax: The actual syntax value, e.g. "proto2" or "proto3". +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNode, semicolon *RuneNode) *SyntaxNode { + if keyword == nil { + panic("keyword is nil") + } + if equals == nil { + panic("equals is nil") + } + if syntax == nil { + panic("syntax is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, equals, syntax, semicolon} + return &SyntaxNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Equals: equals, + Syntax: syntax, + Semicolon: semicolon, + } +} + +// ImportNode represents an import statement. Example: +// +// import "google/protobuf/empty.proto"; +type ImportNode struct { + compositeNode + Keyword *KeywordNode + // Optional; if present indicates this is a public import + Public *KeywordNode + // Optional; if present indicates this is a weak import + Weak *KeywordNode + Name StringValueNode + Semicolon *RuneNode +} + +// NewImportNode creates a new *ImportNode. The public and weak arguments are optional +// and only one or the other (or neither) may be specified, not both. When public is +// non-nil, it indicates the "public" keyword in the import statement and means this is +// a public import. When weak is non-nil, it indicates the "weak" keyword in the import +// statement and means this is a weak import. When both are nil, this is a normal import. +// The other arguments must be non-nil: +// - keyword: The token corresponding to the "import" keyword. +// - public: The token corresponding to the optional "public" keyword. +// - weak: The token corresponding to the optional "weak" keyword. +// - name: The actual imported file name. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, name StringValueNode, semicolon *RuneNode) *ImportNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 3 + if public != nil || weak != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + if public != nil { + children = append(children, public) + } else if weak != nil { + children = append(children, weak) + } + children = append(children, name, semicolon) + + return &ImportNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Public: public, + Weak: weak, + Name: name, + Semicolon: semicolon, + } +} + +func (*ImportNode) fileElement() {} + +// PackageNode represents a package declaration. Example: +// +// package foobar.com; +type PackageNode struct { + compositeNode + Keyword *KeywordNode + Name IdentValueNode + Semicolon *RuneNode +} + +func (*PackageNode) fileElement() {} + +// NewPackageNode creates a new *PackageNode. All three arguments must be non-nil: +// - keyword: The token corresponding to the "package" keyword. +// - name: The package name declared for the file. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewPackageNode(keyword *KeywordNode, name IdentValueNode, semicolon *RuneNode) *PackageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, semicolon} + return &PackageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go new file mode 100644 index 00000000..ed97e973 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go @@ -0,0 +1,134 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Identifier is a possibly-qualified name. This is used to distinguish +// ValueNode values that are references/identifiers vs. those that are +// string literals. +type Identifier string + +// IdentValueNode is an AST node that represents an identifier. +type IdentValueNode interface { + ValueNode + AsIdentifier() Identifier +} + +var _ IdentValueNode = (*IdentNode)(nil) +var _ IdentValueNode = (*CompoundIdentNode)(nil) + +// IdentNode represents a simple, unqualified identifier. These are used to name +// elements declared in a protobuf file or to refer to elements. Example: +// +// foobar +type IdentNode struct { + terminalNode + Val string +} + +// NewIdentNode creates a new *IdentNode. The given val is the identifier text. +func NewIdentNode(val string, info TokenInfo) *IdentNode { + return &IdentNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *IdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *IdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// ToKeyword is used to convert identifiers to keywords. Since keywords are not +// reserved in the protobuf language, they are initially lexed as identifiers +// and then converted to keywords based on context. +func (n *IdentNode) ToKeyword() *KeywordNode { + return (*KeywordNode)(n) +} + +// CompoundIdentNode represents a qualified identifier. A qualified identifier +// has at least one dot and possibly multiple identifier names (all separated by +// dots). If the identifier has a leading dot, then it is a *fully* qualified +// identifier. Example: +// +// .com.foobar.Baz +type CompoundIdentNode struct { + compositeNode + // Optional leading dot, indicating that the identifier is fully qualified. + LeadingDot *RuneNode + Components []*IdentNode + // Dots[0] is the dot after Components[0]. The length of Dots is always + // one less than the length of Components. + Dots []*RuneNode + // The text value of the identifier, with all components and dots + // concatenated. + Val string +} + +// NewCompoundIdentNode creates a *CompoundIdentNode. The leadingDot may be nil. +// The dots arg must have a length that is one less than the length of +// components. The components arg must not be empty. +func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots []*RuneNode) *CompoundIdentNode { + if len(components) == 0 { + panic("must have at least one component") + } + if len(dots) != len(components)-1 { + panic(fmt.Sprintf("%d components requires %d dots, not %d", len(components), len(components)-1, len(dots))) + } + numChildren := len(components)*2 - 1 + if leadingDot != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + var b strings.Builder + if leadingDot != nil { + children = append(children, leadingDot) + b.WriteRune(leadingDot.Rune) + } + for i, comp := range components { + if i > 0 { + dot := dots[i-1] + children = append(children, dot) + b.WriteRune(dot.Rune) + } + children = append(children, comp) + b.WriteString(comp.Val) + } + return &CompoundIdentNode{ + compositeNode: compositeNode{ + children: children, + }, + LeadingDot: leadingDot, + Components: components, + Dots: dots, + Val: b.String(), + } +} + +func (n *CompoundIdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *CompoundIdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// KeywordNode is an AST node that represents a keyword. Keywords are +// like identifiers, but they have special meaning in particular contexts. +// Example: +// +// message +type KeywordNode IdentNode + +// NewKeywordNode creates a new *KeywordNode. The given val is the keyword. +func NewKeywordNode(val string, info TokenInfo) *KeywordNode { + return &KeywordNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go new file mode 100644 index 00000000..c98b0f81 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go @@ -0,0 +1,199 @@ +package ast + +import "fmt" + +// MessageDeclNode is a node in the AST that defines a message type. This +// includes normal message fields as well as implicit messages: +// - *MessageNode +// - *GroupNode (the group is a field and inline message type) +// - *MapFieldNode (map fields implicitly define a MapEntry message type) +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type MessageDeclNode interface { + Node + MessageName() Node +} + +var _ MessageDeclNode = (*MessageNode)(nil) +var _ MessageDeclNode = (*GroupNode)(nil) +var _ MessageDeclNode = (*MapFieldNode)(nil) +var _ MessageDeclNode = NoSourceNode{} + +// MessageNode represents a message declaration. Example: +// +// message Foo { +// string name = 1; +// repeated string labels = 2; +// bytes extra = 3; +// } +type MessageNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + MessageBody +} + +func (*MessageNode) fileElement() {} +func (*MessageNode) msgElement() {} + +// NewMessageNode creates a new *MessageNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "message" keyword. +// - name: The token corresponding to the field's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewMessageNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *MessageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &MessageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *MessageNode) MessageName() Node { + return n.Name +} + +// MessageBody represents the body of a message. It is used by both +// MessageNodes and GroupNodes. +type MessageBody struct { + OpenBrace *RuneNode + Decls []MessageElement + CloseBrace *RuneNode +} + +func populateMessageBody(m *MessageBody, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) { + m.OpenBrace = openBrace + m.Decls = decls + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneOfNode, + *MessageNode, *EnumNode, *ExtendNode, *ExtensionRangeNode, + *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid MessageElement type: %T", decl)) + } + } + m.CloseBrace = closeBrace +} + +// MessageElement is an interface implemented by all AST nodes that can +// appear in a message body. +type MessageElement interface { + Node + msgElement() +} + +var _ MessageElement = (*OptionNode)(nil) +var _ MessageElement = (*FieldNode)(nil) +var _ MessageElement = (*MapFieldNode)(nil) +var _ MessageElement = (*OneOfNode)(nil) +var _ MessageElement = (*GroupNode)(nil) +var _ MessageElement = (*MessageNode)(nil) +var _ MessageElement = (*EnumNode)(nil) +var _ MessageElement = (*ExtendNode)(nil) +var _ MessageElement = (*ExtensionRangeNode)(nil) +var _ MessageElement = (*ReservedNode)(nil) +var _ MessageElement = (*EmptyDeclNode)(nil) + +// ExtendNode represents a declaration of extension fields. Example: +// +// extend google.protobuf.FieldOptions { +// bool redacted = 33333; +// } +type ExtendNode struct { + compositeNode + Keyword *KeywordNode + Extendee IdentValueNode + OpenBrace *RuneNode + Decls []ExtendElement + CloseBrace *RuneNode +} + +func (*ExtendNode) fileElement() {} +func (*ExtendNode) msgElement() {} + +// NewExtendNode creates a new *ExtendNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "extend" keyword. +// - extendee: The token corresponding to the name of the extended message. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewExtendNode(keyword *KeywordNode, extendee IdentValueNode, openBrace *RuneNode, decls []ExtendElement, closeBrace *RuneNode) *ExtendNode { + if keyword == nil { + panic("keyword is nil") + } + if extendee == nil { + panic("extendee is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, extendee, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &ExtendNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Extendee: extendee, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } + for _, decl := range decls { + switch decl := decl.(type) { + case *FieldNode: + decl.Extendee = ret + case *GroupNode: + decl.Extendee = ret + case *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ExtendElement type: %T", decl)) + } + } + return ret +} + +// ExtendElement is an interface implemented by all AST nodes that can +// appear in the body of an extends declaration. +type ExtendElement interface { + Node + extendElement() +} + +var _ ExtendElement = (*FieldNode)(nil) +var _ ExtendElement = (*GroupNode)(nil) +var _ ExtendElement = (*EmptyDeclNode)(nil) diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go new file mode 100644 index 00000000..44e02b10 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go @@ -0,0 +1,103 @@ +package ast + +// UnknownPos is a placeholder position when only the source file +// name is known. +func UnknownPos(filename string) *SourcePos { + return &SourcePos{Filename: filename} +} + +// NoSourceNode is a placeholder AST node that implements numerous +// interfaces in this package. It can be used to represent an AST +// element for a file whose source is not available. +type NoSourceNode struct { + pos *SourcePos +} + +// NewNoSourceNode creates a new NoSourceNode for the given filename. +func NewNoSourceNode(filename string) NoSourceNode { + return NoSourceNode{pos: UnknownPos(filename)} +} + +func (n NoSourceNode) Start() *SourcePos { + return n.pos +} + +func (n NoSourceNode) End() *SourcePos { + return n.pos +} + +func (n NoSourceNode) LeadingComments() []Comment { + return nil +} + +func (n NoSourceNode) TrailingComments() []Comment { + return nil +} + +func (n NoSourceNode) GetSyntax() Node { + return n +} + +func (n NoSourceNode) GetName() Node { + return n +} + +func (n NoSourceNode) GetValue() ValueNode { + return n +} + +func (n NoSourceNode) FieldLabel() Node { + return n +} + +func (n NoSourceNode) FieldName() Node { + return n +} + +func (n NoSourceNode) FieldType() Node { + return n +} + +func (n NoSourceNode) FieldTag() Node { + return n +} + +func (n NoSourceNode) FieldExtendee() Node { + return n +} + +func (n NoSourceNode) GetGroupKeyword() Node { + return n +} + +func (n NoSourceNode) GetOptions() *CompactOptionsNode { + return nil +} + +func (n NoSourceNode) RangeStart() Node { + return n +} + +func (n NoSourceNode) RangeEnd() Node { + return n +} + +func (n NoSourceNode) GetNumber() Node { + return n +} + +func (n NoSourceNode) MessageName() Node { + return n +} + +func (n NoSourceNode) GetInputType() Node { + return n +} + +func (n NoSourceNode) GetOutputType() Node { + return n +} + +func (n NoSourceNode) Value() interface{} { + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go new file mode 100644 index 00000000..a2a8a3b2 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go @@ -0,0 +1,200 @@ +package ast + +// Node is the interface implemented by all nodes in the AST. It +// provides information about the span of this AST node in terms +// of location in the source file. It also provides information +// about all prior comments (attached as leading comments) and +// optional subsequent comments (attached as trailing comments). +type Node interface { + Start() *SourcePos + End() *SourcePos + LeadingComments() []Comment + TrailingComments() []Comment +} + +// TerminalNode represents a leaf in the AST. These represent +// the tokens/lexemes in the protobuf language. Comments and +// whitespace are accumulated by the lexer and associated with +// the following lexed token. +type TerminalNode interface { + Node + // PopLeadingComment removes the first leading comment from this + // token and returns it. If the node has no leading comments then + // this method will panic. + PopLeadingComment() Comment + // PushTrailingComment appends the given comment to the token's + // trailing comments. + PushTrailingComment(Comment) + // LeadingWhitespace returns any whitespace between the prior comment + // (last leading comment), if any, or prior lexed token and this token. + LeadingWhitespace() string + // RawText returns the raw text of the token as read from the source. + RawText() string +} + +var _ TerminalNode = (*StringLiteralNode)(nil) +var _ TerminalNode = (*UintLiteralNode)(nil) +var _ TerminalNode = (*FloatLiteralNode)(nil) +var _ TerminalNode = (*IdentNode)(nil) +var _ TerminalNode = (*BoolLiteralNode)(nil) +var _ TerminalNode = (*SpecialFloatLiteralNode)(nil) +var _ TerminalNode = (*KeywordNode)(nil) +var _ TerminalNode = (*RuneNode)(nil) + +// TokenInfo represents state accumulated by the lexer to associated with a +// token (aka terminal node). +type TokenInfo struct { + // The location of the token in the source file. + PosRange + // The raw text of the token. + RawText string + // Any comments encountered preceding this token. + LeadingComments []Comment + // Any leading whitespace immediately preceding this token. + LeadingWhitespace string + // Any trailing comments following this token. This is usually + // empty as tokens are created by the lexer immediately and + // trailing comments are accounted for afterwards, added using + // the node's PushTrailingComment method. + TrailingComments []Comment +} + +func (t *TokenInfo) asTerminalNode() terminalNode { + return terminalNode{ + posRange: t.PosRange, + leadingComments: t.LeadingComments, + leadingWhitespace: t.LeadingWhitespace, + trailingComments: t.TrailingComments, + raw: t.RawText, + } +} + +// CompositeNode represents any non-terminal node in the tree. These +// are interior or root nodes and have child nodes. +type CompositeNode interface { + Node + // All AST nodes that are immediate children of this one. + Children() []Node +} + +// terminalNode contains book-keeping shared by all TerminalNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the TerminalNode +// interface. +type terminalNode struct { + posRange PosRange + leadingComments []Comment + leadingWhitespace string + trailingComments []Comment + raw string +} + +func (n *terminalNode) Start() *SourcePos { + return &n.posRange.Start +} + +func (n *terminalNode) End() *SourcePos { + return &n.posRange.End +} + +func (n *terminalNode) LeadingComments() []Comment { + return n.leadingComments +} + +func (n *terminalNode) TrailingComments() []Comment { + return n.trailingComments +} + +func (n *terminalNode) PopLeadingComment() Comment { + c := n.leadingComments[0] + n.leadingComments = n.leadingComments[1:] + return c +} + +func (n *terminalNode) PushTrailingComment(c Comment) { + n.trailingComments = append(n.trailingComments, c) +} + +func (n *terminalNode) LeadingWhitespace() string { + return n.leadingWhitespace +} + +func (n *terminalNode) RawText() string { + return n.raw +} + +// compositeNode contains book-keeping shared by all CompositeNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the CompositeNode +// interface. +type compositeNode struct { + children []Node +} + +func (n *compositeNode) Children() []Node { + return n.children +} + +func (n *compositeNode) Start() *SourcePos { + return n.children[0].Start() +} + +func (n *compositeNode) End() *SourcePos { + return n.children[len(n.children)-1].End() +} + +func (n *compositeNode) LeadingComments() []Comment { + return n.children[0].LeadingComments() +} + +func (n *compositeNode) TrailingComments() []Comment { + return n.children[len(n.children)-1].TrailingComments() +} + +// RuneNode represents a single rune in protobuf source. Runes +// are typically collected into tokens, but some runes stand on +// their own, such as punctuation/symbols like commas, semicolons, +// equals signs, open and close symbols (braces, brackets, angles, +// and parentheses), and periods/dots. +type RuneNode struct { + terminalNode + Rune rune +} + +// NewRuneNode creates a new *RuneNode with the given properties. +func NewRuneNode(r rune, info TokenInfo) *RuneNode { + return &RuneNode{ + terminalNode: info.asTerminalNode(), + Rune: r, + } +} + +// EmptyDeclNode represents an empty declaration in protobuf source. +// These amount to extra semicolons, with no actual content preceding +// the semicolon. +type EmptyDeclNode struct { + compositeNode + Semicolon *RuneNode +} + +// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must +// be non-nil. +func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode { + if semicolon == nil { + panic("semicolon is nil") + } + return &EmptyDeclNode{ + compositeNode: compositeNode{ + children: []Node{semicolon}, + }, + Semicolon: semicolon, + } +} + +func (e *EmptyDeclNode) fileElement() {} +func (e *EmptyDeclNode) msgElement() {} +func (e *EmptyDeclNode) extendElement() {} +func (e *EmptyDeclNode) oneOfElement() {} +func (e *EmptyDeclNode) enumElement() {} +func (e *EmptyDeclNode) serviceElement() {} +func (e *EmptyDeclNode) methodElement() {} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go new file mode 100644 index 00000000..c4ed169c --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go @@ -0,0 +1,361 @@ +package ast + +import "fmt" + +// OptionDeclNode is a placeholder interface for AST nodes that represent +// options. This allows NoSourceNode to be used in place of *OptionNode +// for some usages. +type OptionDeclNode interface { + Node + GetName() Node + GetValue() ValueNode +} + +var _ OptionDeclNode = (*OptionNode)(nil) +var _ OptionDeclNode = NoSourceNode{} + +// OptionNode represents the declaration of a single option for an element. +// It is used both for normal option declarations (start with "option" keyword +// and end with semicolon) and for compact options found in fields, enum values, +// and extension ranges. Example: +// +// option (custom.option) = "foo"; +type OptionNode struct { + compositeNode + Keyword *KeywordNode // absent for compact options + Name *OptionNameNode + Equals *RuneNode + Val ValueNode + Semicolon *RuneNode // absent for compact options +} + +func (e *OptionNode) fileElement() {} +func (e *OptionNode) msgElement() {} +func (e *OptionNode) oneOfElement() {} +func (e *OptionNode) enumElement() {} +func (e *OptionNode) serviceElement() {} +func (e *OptionNode) methodElement() {} + +// NewOptionNode creates a new *OptionNode for a full option declaration (as +// used in files, messages, oneofs, enums, services, and methods). All arguments +// must be non-nil. (Also see NewCompactOptionNode.) +// - keyword: The token corresponding to the "option" keyword. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewOptionNode(keyword *KeywordNode, name *OptionNameNode, equals *RuneNode, val ValueNode, semicolon *RuneNode) *OptionNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if val == nil { + panic("val is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, equals, val, semicolon} + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Equals: equals, + Val: val, + Semicolon: semicolon, + } +} + +// NewCompactOptionNode creates a new *OptionNode for a full compact declaration +// (as used in fields, enum values, and extension ranges). All arguments must be +// non-nil. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +func NewCompactOptionNode(name *OptionNameNode, equals *RuneNode, val ValueNode) *OptionNode { + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if val == nil { + panic("val is nil") + } + children := []Node{name, equals, val} + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Val: val, + } +} + +func (n *OptionNode) GetName() Node { + return n.Name +} + +func (n *OptionNode) GetValue() ValueNode { + return n.Val +} + +// OptionNameNode represents an option name or even a traversal through message +// types to name a nested option field. Example: +// +// (foo.bar).baz.(bob) +type OptionNameNode struct { + compositeNode + Parts []*FieldReferenceNode + // Dots represent the separating '.' characters between name parts. The + // length of this slice must be exactly len(Parts)-1, each item in Parts + // having a corresponding item in this slice *except the last* (since a + // trailing dot is not allowed). + // + // These do *not* include dots that are inside of an extension name. For + // example: (foo.bar).baz.(bob) has three parts: + // 1. (foo.bar) - an extension name + // 2. baz - a regular field in foo.bar + // 3. (bob) - an extension field in baz + // Note that the dot in foo.bar will thus not be present in Dots but is + // instead in Parts[0]. + Dots []*RuneNode +} + +// NewOptionNameNode creates a new *OptionNameNode. The dots arg must have a +// length that is one less than the length of parts. The parts arg must not be +// empty. +func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNameNode { + if len(parts) == 0 { + panic("must have at least one part") + } + if len(dots) != len(parts)-1 { + panic(fmt.Sprintf("%d parts requires %d dots, not %d", len(parts), len(parts)-1, len(dots))) + } + children := make([]Node, 0, len(parts)*2-1) + for i, part := range parts { + if part == nil { + panic(fmt.Sprintf("parts[%d] is nil", i)) + } + if i > 0 { + if dots[i-1] == nil { + panic(fmt.Sprintf("dots[%d] is nil", i-1)) + } + children = append(children, dots[i-1]) + } + children = append(children, part) + } + return &OptionNameNode{ + compositeNode: compositeNode{ + children: children, + }, + Parts: parts, + Dots: dots, + } +} + +// FieldReferenceNode is a reference to a field name. It can indicate a regular +// field (simple unqualified name), an extension field (possibly-qualified name +// that is enclosed either in brackets or parentheses), or an "any" type +// reference (a type URL in the form "server.host/fully.qualified.Name" that is +// enclosed in brackets). +// +// Extension names are used in options to refer to custom options (which are +// actually extensions), in which case the name is enclosed in parentheses "(" +// and ")". They can also be used to refer to extension fields of options. +// +// Extension names are also used in message literals to set extension fields, +// in which case the name is enclosed in square brackets "[" and "]". +// +// "Any" type references can only be used in message literals, and are not +// allowed in option names. They are always enclosed in square brackets. An +// "any" type reference is distinguished from an extension name by the presence +// of a slash, which must be present in an "any" type reference and must be +// absent in an extension name. +// +// Examples: +// +// foobar +// (foo.bar) +// [foo.bar] +// [type.googleapis.com/foo.bar] +type FieldReferenceNode struct { + compositeNode + Open *RuneNode // only present for extension names and "any" type references + + // only present for "any" type references + UrlPrefix IdentValueNode + Slash *RuneNode + + Name IdentValueNode + + Close *RuneNode // only present for extension names and "any" type references +} + +// NewFieldReferenceNode creates a new *FieldReferenceNode for a regular field. +// The name arg must not be nil. +func NewFieldReferenceNode(name *IdentNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + children := []Node{name} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + } +} + +// NewExtensionFieldReferenceNode creates a new *FieldReferenceNode for an +// extension field. All args must be non-nil. The openSym and closeSym runes +// should be "(" and ")" or "[" and "]". +func NewExtensionFieldReferenceNode(openSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + children := []Node{openSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Name: name, + Close: closeSym, + } +} + +// NewAnyTypeReferenceNode creates a new *FieldReferenceNode for an "any" +// type reference. All args must be non-nil. The openSym and closeSym runes +// should be "[" and "]". The slashSym run should be "/". +func NewAnyTypeReferenceNode(openSym *RuneNode, urlPrefix IdentValueNode, slashSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if urlPrefix == nil { + panic("urlPrefix is nil") + } + if slashSym == nil { + panic("slashSym is nil") + } + children := []Node{openSym, urlPrefix, slashSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + UrlPrefix: urlPrefix, + Slash: slashSym, + Name: name, + Close: closeSym, + } +} + +// IsExtension reports if this is an extension name or not (e.g. enclosed in +// punctuation, such as parentheses or brackets). +func (a *FieldReferenceNode) IsExtension() bool { + return a.Open != nil && a.Slash == nil +} + +// IsExtension reports if this is an extension name or not (e.g. enclosed in +// punctuation, such as parentheses or brackets). +func (a *FieldReferenceNode) IsAnyTypeReference() bool { + return a.Slash != nil +} + +func (a *FieldReferenceNode) Value() string { + if a.Open != nil { + if a.Slash != nil { + return string(a.Open.Rune) + string(a.UrlPrefix.AsIdentifier()) + string(a.Slash.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } + return string(a.Open.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } else { + return string(a.Name.AsIdentifier()) + } +} + +// CompactOptionsNode represents a compact options declaration, as used with +// fields, enum values, and extension ranges. Example: +// +// [deprecated = true, json_name = "foo_bar"] +type CompactOptionsNode struct { + compositeNode + OpenBracket *RuneNode + Options []*OptionNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Options)-1, with each item + // in Options having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewCompactOptionsNode creates a *CompactOptionsNode. All args must be +// non-nil. The commas arg must have a length that is one less than the +// length of opts. The opts arg must not be empty. +func NewCompactOptionsNode(openBracket *RuneNode, opts []*OptionNode, commas []*RuneNode, closeBracket *RuneNode) *CompactOptionsNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(opts) == 0 { + panic("must have at least one part") + } + if len(commas) != len(opts)-1 { + panic(fmt.Sprintf("%d opts requires %d commas, not %d", len(opts), len(opts)-1, len(commas))) + } + children := make([]Node, 0, len(opts)*2+1) + children = append(children, openBracket) + for i, opt := range opts { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if opt == nil { + panic(fmt.Sprintf("opts[%d] is nil", i)) + } + children = append(children, opt) + } + children = append(children, closeBracket) + + return &CompactOptionsNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Options: opts, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (e *CompactOptionsNode) GetElements() []*OptionNode { + if e == nil { + return nil + } + return e.Options +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go new file mode 100644 index 00000000..271200c7 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go @@ -0,0 +1,86 @@ +package ast + +import "io" + +// Print prints the given AST node to the given output. This operation +// basically walks the AST and, for each TerminalNode, prints the node's +// leading comments, leading whitespace, the node's raw text, and then +// any trailing comments. If the given node is a *FileNode, it will then +// also print the file's FinalComments and FinalWhitespace. +func Print(w io.Writer, node Node) error { + sw, ok := w.(stringWriter) + if !ok { + sw = &strWriter{w} + } + var err error + Walk(node, func(n Node) (bool, VisitFunc) { + if err != nil { + return false, nil + } + token, ok := n.(TerminalNode) + if !ok { + return true, nil + } + + err = printComments(sw, token.LeadingComments()) + if err != nil { + return false, nil + } + + _, err = sw.WriteString(token.LeadingWhitespace()) + if err != nil { + return false, nil + } + + _, err = sw.WriteString(token.RawText()) + if err != nil { + return false, nil + } + + err = printComments(sw, token.TrailingComments()) + return false, nil + }) + if err != nil { + return err + } + + if file, ok := node.(*FileNode); ok { + err = printComments(sw, file.FinalComments) + if err != nil { + return err + } + _, err = sw.WriteString(file.FinalWhitespace) + return err + } + + return nil +} + +func printComments(sw stringWriter, comments []Comment) error { + for _, comment := range comments { + if _, err := sw.WriteString(comment.LeadingWhitespace); err != nil { + return err + } + if _, err := sw.WriteString(comment.Text); err != nil { + return err + } + } + return nil +} + +// many io.Writer impls also provide a string-based method +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// adapter, in case the given writer does NOT provide a string-based method +type strWriter struct { + io.Writer +} + +func (s *strWriter) WriteString(str string) (int, error) { + if str == "" { + return 0, nil + } + return s.Write([]byte(str)) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go new file mode 100644 index 00000000..cdd78baf --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go @@ -0,0 +1,305 @@ +package ast + +import "fmt" + +// ExtensionRangeNode represents an extension range declaration in an extendable +// message. Example: +// +// extensions 100 to max; +type ExtensionRangeNode struct { + compositeNode + Keyword *KeywordNode + Ranges []*RangeNode + // Commas represent the separating ',' characters between ranges. The + // length of this slice must be exactly len(Ranges)-1, each item in Ranges + // having a corresponding item in this slice *except the last* (since a + // trailing comma is not allowed). + Commas []*RuneNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (e *ExtensionRangeNode) msgElement() {} + +// NewExtensionRangeNode creates a new *ExtensionRangeNode. All args must be +// non-nil except opts, which may be nil. +// - keyword: The token corresponding to the "extends" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - opts: The node corresponding to options that apply to each of the ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewExtensionRangeNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, opts *CompactOptionsNode, semicolon *RuneNode) *ExtensionRangeNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + numChildren := len(ranges)*2 + 1 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + return &ExtensionRangeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Options: opts, + Semicolon: semicolon, + } +} + +// RangeDeclNode is a placeholder interface for AST nodes that represent +// numeric values. This allows NoSourceNode to be used in place of *RangeNode +// for some usages. +type RangeDeclNode interface { + Node + RangeStart() Node + RangeEnd() Node +} + +var _ RangeDeclNode = (*RangeNode)(nil) +var _ RangeDeclNode = NoSourceNode{} + +// RangeNode represents a range expression, used in both extension ranges and +// reserved ranges. Example: +// +// 1000 to max +type RangeNode struct { + compositeNode + StartVal IntValueNode + // if To is non-nil, then exactly one of EndVal or Max must also be non-nil + To *KeywordNode + // EndVal and Max are mutually exclusive + EndVal IntValueNode + Max *KeywordNode +} + +// NewRangeNode creates a new *RangeNode. The start argument must be non-nil. +// The to argument represents the "to" keyword. If present (i.e. if it is non-nil), +// then so must be exactly one of end or max. If max is non-nil, it indicates a +// "100 to max" style range. But if end is non-nil, the end of the range is a +// literal, such as "100 to 200". +func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, max *KeywordNode) *RangeNode { + if start == nil { + panic("start is nil") + } + numChildren := 1 + if to != nil { + if end == nil && max == nil { + panic("to is not nil, but end and max both are") + } + if end != nil && max != nil { + panic("end and max cannot be both non-nil") + } + numChildren = 3 + } else { + if end != nil { + panic("to is nil, but end is not") + } + if max != nil { + panic("to is nil, but max is not") + } + } + children := make([]Node, 0, numChildren) + children = append(children, start) + if to != nil { + children = append(children, to) + if end != nil { + children = append(children, end) + } else { + children = append(children, max) + } + } + return &RangeNode{ + compositeNode: compositeNode{ + children: children, + }, + StartVal: start, + To: to, + EndVal: end, + Max: max, + } +} + +func (n *RangeNode) RangeStart() Node { + return n.StartVal +} + +func (n *RangeNode) RangeEnd() Node { + if n.Max != nil { + return n.Max + } + if n.EndVal != nil { + return n.EndVal + } + return n.StartVal +} + +func (n *RangeNode) StartValue() interface{} { + return n.StartVal.Value() +} + +func (n *RangeNode) StartValueAsInt32(min, max int32) (int32, bool) { + return AsInt32(n.StartVal, min, max) +} + +func (n *RangeNode) EndValue() interface{} { + if n.EndVal == nil { + return nil + } + return n.EndVal.Value() +} + +func (n *RangeNode) EndValueAsInt32(min, max int32) (int32, bool) { + if n.Max != nil { + return max, true + } + if n.EndVal == nil { + return n.StartValueAsInt32(min, max) + } + return AsInt32(n.EndVal, min, max) +} + +// ReservedNode represents reserved declaration, which can be used to reserve +// either names or numbers. Examples: +// +// reserved 1, 10-12, 15; +// reserved "foo", "bar", "baz"; +type ReservedNode struct { + compositeNode + Keyword *KeywordNode + // If non-empty, this node represents reserved ranges and Names will be empty. + Ranges []*RangeNode + // If non-empty, this node represents reserved names and Ranges will be empty. + Names []StringValueNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Ranges)-1 or len(Names)-1, depending + // on whether this node represents reserved ranges or reserved names. Each item + // in Ranges or Names has a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + Semicolon *RuneNode +} + +func (*ReservedNode) msgElement() {} +func (*ReservedNode) enumElement() {} + +// NewReservedRangesNode creates a new *ReservedNode that represents reserved +// numeric ranges. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedRangesNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + children := make([]Node, 0, len(ranges)*2+1) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + children = append(children, semicolon) + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Semicolon: semicolon, + } +} + +// NewReservedNamesNode creates a new *ReservedNode that represents reserved +// names. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - names: One or more names. +// - commas: Tokens that represent the "," runes that delimit the names. +// The length of commas must be one less than the length of names. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(names) == 0 { + panic("must have at least one name") + } + if len(commas) != len(names)-1 { + panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas))) + } + children := make([]Node, 0, len(names)*2+1) + children = append(children, keyword) + for i, name := range names { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if name == nil { + panic(fmt.Sprintf("names[%d] is nil", i)) + } + children = append(children, name) + } + children = append(children, semicolon) + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Names: names, + Commas: commas, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go new file mode 100644 index 00000000..739b29cc --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go @@ -0,0 +1,273 @@ +package ast + +import "fmt" + +// ServiceNode represents a service declaration. Example: +// +// service Foo { +// rpc Bar (Baz) returns (Bob); +// rpc Frobnitz (stream Parts) returns (Gyzmeaux); +// } +type ServiceNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []ServiceElement + CloseBrace *RuneNode +} + +func (*ServiceNode) fileElement() {} + +// NewServiceNode creates a new *ServiceNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "service" keyword. +// - name: The token corresponding to the service's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the service body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewServiceNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []ServiceElement, closeBrace *RuneNode) *ServiceNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *RPCNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ServiceElement type: %T", decl)) + } + } + + return &ServiceNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +// ServiceElement is an interface implemented by all AST nodes that can +// appear in the body of a service declaration. +type ServiceElement interface { + Node + serviceElement() +} + +var _ ServiceElement = (*OptionNode)(nil) +var _ ServiceElement = (*RPCNode)(nil) +var _ ServiceElement = (*EmptyDeclNode)(nil) + +// RPCDeclNode is a placeholder interface for AST nodes that represent RPC +// declarations. This allows NoSourceNode to be used in place of *RPCNode +// for some usages. +type RPCDeclNode interface { + Node + GetInputType() Node + GetOutputType() Node +} + +var _ RPCDeclNode = (*RPCNode)(nil) +var _ RPCDeclNode = NoSourceNode{} + +// RPCNode represents an RPC declaration. Example: +// +// rpc Foo (Bar) returns (Baz); +type RPCNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + Input *RPCTypeNode + Returns *KeywordNode + Output *RPCTypeNode + Semicolon *RuneNode + OpenBrace *RuneNode + Decls []RPCElement + CloseBrace *RuneNode +} + +func (n *RPCNode) serviceElement() {} + +// NewRPCNode creates a new *RPCNode with no body. All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewRPCNode(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, semicolon *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, input, returns, output, semicolon} + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + Semicolon: semicolon, + } +} + +// NewRPCNodeWithBody creates a new *RPCNode that includes a body (and possibly +// options). All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the RPC body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewRPCNodeWithBody(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, openBrace *RuneNode, decls []RPCElement, closeBrace *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 7+len(decls)) + children = append(children, keyword, name, input, returns, output, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid RPCElement type: %T", decl)) + } + } + + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *RPCNode) GetInputType() Node { + return n.Input.MessageType +} + +func (n *RPCNode) GetOutputType() Node { + return n.Output.MessageType +} + +// RPCElement is an interface implemented by all AST nodes that can +// appear in the body of an rpc declaration (aka method). +type RPCElement interface { + Node + methodElement() +} + +var _ RPCElement = (*OptionNode)(nil) +var _ RPCElement = (*EmptyDeclNode)(nil) + +// RPCTypeNode represents the declaration of a request or response type for an +// RPC. Example: +// +// (stream foo.Bar) +type RPCTypeNode struct { + compositeNode + OpenParen *RuneNode + Stream *KeywordNode + MessageType IdentValueNode + CloseParen *RuneNode +} + +// NewRPCTypeNode creates a new *RPCTypeNode. All arguments must be non-nil +// except stream, which may be nil. +// - openParen: The token corresponding to the "(" rune that starts the declaration. +// - stream: The token corresponding to the "stream" keyword or nil if not present. +// - msgType: The token corresponding to the message type's name. +// - closeParen: The token corresponding to the ")" rune that ends the declaration. +func NewRPCTypeNode(openParen *RuneNode, stream *KeywordNode, msgType IdentValueNode, closeParen *RuneNode) *RPCTypeNode { + if openParen == nil { + panic("openParen is nil") + } + if msgType == nil { + panic("msgType is nil") + } + if closeParen == nil { + panic("closeParen is nil") + } + var children []Node + if stream != nil { + children = []Node{openParen, stream, msgType, closeParen} + } else { + children = []Node{openParen, msgType, closeParen} + } + + return &RPCTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenParen: openParen, + Stream: stream, + MessageType: msgType, + CloseParen: closeParen, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go new file mode 100644 index 00000000..8ab09c6f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go @@ -0,0 +1,29 @@ +package ast + +import ( + "github.com/bufbuild/protocompile/ast" +) + +// SourcePos identifies a location in a proto source file. +type SourcePos = ast.SourcePos + +// PosRange is a range of positions in a source file that indicates +// the span of some region of source, such as a single token or +// a sub-tree of the AST. +type PosRange struct { + Start, End SourcePos +} + +// Comment represents a single comment in a source file. It indicates +// the position of the comment and its contents. +type Comment struct { + // The location of the comment in the source file. + PosRange + // Any whitespace between the prior lexical element (either a token + // or other comment) and this comment. + LeadingWhitespace string + // The text of the comment, including any "//" or "/*" and "*/" + // symbols at the start and end. Single-line comments will include + // the trailing newline rune in Text. + Text string +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go new file mode 100644 index 00000000..c75f4481 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go @@ -0,0 +1,569 @@ +package ast + +import ( + "fmt" + "math" + "strings" +) + +// ValueNode is an AST node that represents a literal value. +// +// It also includes references (e.g. IdentifierValueNode), which can be +// used as values in some contexts, such as describing the default value +// for a field, which can refer to an enum value. +// +// This also allows NoSourceNode to be used in place of a real value node +// for some usages. +type ValueNode interface { + Node + // Value returns a Go representation of the value. For scalars, this + // will be a string, int64, uint64, float64, or bool. This could also + // be an Identifier (e.g. IdentValueNodes). It can also be a composite + // literal: + // * For array literals, the type returned will be []ValueNode + // * For message literals, the type returned will be []*MessageFieldNode + Value() interface{} +} + +var _ ValueNode = (*IdentNode)(nil) +var _ ValueNode = (*CompoundIdentNode)(nil) +var _ ValueNode = (*StringLiteralNode)(nil) +var _ ValueNode = (*CompoundStringLiteralNode)(nil) +var _ ValueNode = (*UintLiteralNode)(nil) +var _ ValueNode = (*PositiveUintLiteralNode)(nil) +var _ ValueNode = (*NegativeIntLiteralNode)(nil) +var _ ValueNode = (*FloatLiteralNode)(nil) +var _ ValueNode = (*SpecialFloatLiteralNode)(nil) +var _ ValueNode = (*SignedFloatLiteralNode)(nil) +var _ ValueNode = (*BoolLiteralNode)(nil) +var _ ValueNode = (*ArrayLiteralNode)(nil) +var _ ValueNode = (*MessageLiteralNode)(nil) +var _ ValueNode = NoSourceNode{} + +// StringValueNode is an AST node that represents a string literal. +// Such a node can be a single literal (*StringLiteralNode) or a +// concatenation of multiple literals (*CompoundStringLiteralNode). +type StringValueNode interface { + ValueNode + AsString() string +} + +var _ StringValueNode = (*StringLiteralNode)(nil) +var _ StringValueNode = (*CompoundStringLiteralNode)(nil) + +// StringLiteralNode represents a simple string literal. Example: +// +// "proto2" +type StringLiteralNode struct { + terminalNode + // Val is the actual string value that the literal indicates. + Val string +} + +// NewStringLiteralNode creates a new *StringLiteralNode with the given val. +func NewStringLiteralNode(val string, info TokenInfo) *StringLiteralNode { + return &StringLiteralNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *StringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *StringLiteralNode) AsString() string { + return n.Val +} + +// CompoundStringLiteralNode represents a compound string literal, which is +// the concatenaton of adjacent string literals. Example: +// +// "this " "is" " all one " "string" +type CompoundStringLiteralNode struct { + compositeNode + Val string +} + +// NewCompoundLiteralStringNode creates a new *CompoundStringLiteralNode that +// consists of the given string components. The components argument may not be +// empty. +func NewCompoundLiteralStringNode(components ...*StringLiteralNode) *CompoundStringLiteralNode { + if len(components) == 0 { + panic("must have at least one component") + } + children := make([]Node, len(components)) + var b strings.Builder + for i, comp := range components { + children[i] = comp + b.WriteString(comp.Val) + } + return &CompoundStringLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Val: b.String(), + } +} + +func (n *CompoundStringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *CompoundStringLiteralNode) AsString() string { + return n.Val +} + +// IntValueNode is an AST node that represents an integer literal. If +// an integer literal is too large for an int64 (or uint64 for +// positive literals), it is represented instead by a FloatValueNode. +type IntValueNode interface { + ValueNode + AsInt64() (int64, bool) + AsUint64() (uint64, bool) +} + +// AsInt32 range checks the given int value and returns its value is +// in the range or 0, false if it is outside the range. +func AsInt32(n IntValueNode, min, max int32) (int32, bool) { + i, ok := n.AsInt64() + if !ok { + return 0, false + } + if i < int64(min) || i > int64(max) { + return 0, false + } + return int32(i), true +} + +var _ IntValueNode = (*UintLiteralNode)(nil) +var _ IntValueNode = (*PositiveUintLiteralNode)(nil) +var _ IntValueNode = (*NegativeIntLiteralNode)(nil) + +// UintLiteralNode represents a simple integer literal with no sign character. +type UintLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val uint64 +} + +// NewUintLiteralNode creates a new *UintLiteralNode with the given val. +func NewUintLiteralNode(val uint64, info TokenInfo) *UintLiteralNode { + return &UintLiteralNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *UintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *UintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *UintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + +func (n *UintLiteralNode) AsFloat() float64 { + return float64(n.Val) +} + +// PositiveUintLiteralNode represents an integer literal with a positive (+) sign. +type PositiveUintLiteralNode struct { + compositeNode + Plus *RuneNode + Uint *UintLiteralNode + Val uint64 +} + +// NewPositiveUintLiteralNode creates a new *PositiveUintLiteralNode. Both +// arguments must be non-nil. +func NewPositiveUintLiteralNode(sign *RuneNode, i *UintLiteralNode) *PositiveUintLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &PositiveUintLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Plus: sign, + Uint: i, + Val: i.Val, + } +} + +func (n *PositiveUintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *PositiveUintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *PositiveUintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + +// NegativeIntLiteralNode represents an integer literal with a negative (-) sign. +type NegativeIntLiteralNode struct { + compositeNode + Minus *RuneNode + Uint *UintLiteralNode + Val int64 +} + +// NewNegativeIntLiteralNode creates a new *NegativeIntLiteralNode. Both +// arguments must be non-nil. +func NewNegativeIntLiteralNode(sign *RuneNode, i *UintLiteralNode) *NegativeIntLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &NegativeIntLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Minus: sign, + Uint: i, + Val: -int64(i.Val), + } +} + +func (n *NegativeIntLiteralNode) Value() interface{} { + return n.Val +} + +func (n *NegativeIntLiteralNode) AsInt64() (int64, bool) { + return n.Val, true +} + +func (n *NegativeIntLiteralNode) AsUint64() (uint64, bool) { + if n.Val < 0 { + return 0, false + } + return uint64(n.Val), true +} + +// FloatValueNode is an AST node that represents a numeric literal with +// a floating point, in scientific notation, or too large to fit in an +// int64 or uint64. +type FloatValueNode interface { + ValueNode + AsFloat() float64 +} + +var _ FloatValueNode = (*FloatLiteralNode)(nil) +var _ FloatValueNode = (*SpecialFloatLiteralNode)(nil) +var _ FloatValueNode = (*UintLiteralNode)(nil) + +// FloatLiteralNode represents a floating point numeric literal. +type FloatLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val float64 +} + +// NewFloatLiteralNode creates a new *FloatLiteralNode with the given val. +func NewFloatLiteralNode(val float64, info TokenInfo) *FloatLiteralNode { + return &FloatLiteralNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *FloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *FloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SpecialFloatLiteralNode represents a special floating point numeric literal +// for "inf" and "nan" values. +type SpecialFloatLiteralNode struct { + *KeywordNode + Val float64 +} + +// NewSpecialFloatLiteralNode returns a new *SpecialFloatLiteralNode for the +// given keyword, which must be "inf" or "nan". +func NewSpecialFloatLiteralNode(name *KeywordNode) *SpecialFloatLiteralNode { + var f float64 + if name.Val == "inf" { + f = math.Inf(1) + } else { + f = math.NaN() + } + return &SpecialFloatLiteralNode{ + KeywordNode: name, + Val: f, + } +} + +func (n *SpecialFloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *SpecialFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SignedFloatLiteralNode represents a signed floating point number. +type SignedFloatLiteralNode struct { + compositeNode + Sign *RuneNode + Float FloatValueNode + Val float64 +} + +// NewSignedFloatLiteralNode creates a new *SignedFloatLiteralNode. Both +// arguments must be non-nil. +func NewSignedFloatLiteralNode(sign *RuneNode, f FloatValueNode) *SignedFloatLiteralNode { + if sign == nil { + panic("sign is nil") + } + if f == nil { + panic("f is nil") + } + children := []Node{sign, f} + val := f.AsFloat() + if sign.Rune == '-' { + val = -val + } + return &SignedFloatLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Sign: sign, + Float: f, + Val: val, + } +} + +func (n *SignedFloatLiteralNode) Value() interface{} { + return n.Val +} + +func (n *SignedFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// BoolLiteralNode represents a boolean literal. +// +// Deprecated: The AST uses IdentNode for boolean literals, where the +// identifier value is "true" or "false". This is required because an +// identifier "true" is not necessarily a boolean value as it could also +// be an enum value named "true" (ditto for "false"). +type BoolLiteralNode struct { + *KeywordNode + Val bool +} + +// NewBoolLiteralNode returns a new *BoolLiteralNode for the given keyword, +// which must be "true" or "false". +func NewBoolLiteralNode(name *KeywordNode) *BoolLiteralNode { + return &BoolLiteralNode{ + KeywordNode: name, + Val: name.Val == "true", + } +} + +func (n *BoolLiteralNode) Value() interface{} { + return n.Val +} + +// ArrayLiteralNode represents an array literal, which is only allowed inside of +// a MessageLiteralNode, to indicate values for a repeated field. Example: +// +// ["foo", "bar", "baz"] +type ArrayLiteralNode struct { + compositeNode + OpenBracket *RuneNode + Elements []ValueNode + // Commas represent the separating ',' characters between elements. The + // length of this slice must be exactly len(Elements)-1, with each item + // in Elements having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewArrayLiteralNode creates a new *ArrayLiteralNode. The openBracket and +// closeBracket args must be non-nil and represent the "[" and "]" runes that +// surround the array values. The given commas arg must have a length that is +// one less than the length of the vals arg. However, vals may be empty, in +// which case commas must also be empty. +func NewArrayLiteralNode(openBracket *RuneNode, vals []ValueNode, commas []*RuneNode, closeBracket *RuneNode) *ArrayLiteralNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(vals) == 0 && len(commas) != 0 { + panic("vals is empty but commas is not") + } + if len(vals) > 0 && len(commas) != len(vals)-1 { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals)-1, len(commas))) + } + children := make([]Node, 0, len(vals)*2+1) + children = append(children, openBracket) + for i, val := range vals { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + } + children = append(children, closeBracket) + + return &ArrayLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Elements: vals, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (n *ArrayLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageLiteralNode represents a message literal, which is compatible with the +// protobuf text format and can be used for custom options with message types. +// Example: +// +// { foo:1 foo:2 foo:3 bar: } +type MessageLiteralNode struct { + compositeNode + Open *RuneNode // should be '{' or '<' + Elements []*MessageFieldNode + // Separator characters between elements, which can be either ',' + // or ';' if present. This slice must be exactly len(Elements) in + // length, with each item in Elements having one corresponding item + // in Seps. Separators in message literals are optional, so a given + // item in this slice may be nil to indicate absence of a separator. + Seps []*RuneNode + Close *RuneNode // should be '}' or '>', depending on Open +} + +// NewMessageLiteralNode creates a new *MessageLiteralNode. The openSym and +// closeSym runes must not be nil and should be "{" and "}" or "<" and ">". +// +// Unlike separators (dots and commas) used for other AST nodes that represent +// a list of elements, the seps arg must be the SAME length as vals, and it may +// contain nil values to indicate absence of a separator (in fact, it could be +// all nils). +func NewMessageLiteralNode(openSym *RuneNode, vals []*MessageFieldNode, seps []*RuneNode, closeSym *RuneNode) *MessageLiteralNode { + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if len(seps) != len(vals) { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals), len(seps))) + } + numChildren := len(vals) + 2 + for _, sep := range seps { + if sep != nil { + numChildren++ + } + } + children := make([]Node, 0, numChildren) + children = append(children, openSym) + for i, val := range vals { + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + if seps[i] != nil { + children = append(children, seps[i]) + } + } + children = append(children, closeSym) + + return &MessageLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Elements: vals, + Seps: seps, + Close: closeSym, + } +} + +func (n *MessageLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageFieldNode represents a single field (name and value) inside of a +// message literal. Example: +// +// foo:"bar" +type MessageFieldNode struct { + compositeNode + Name *FieldReferenceNode + // Sep represents the ':' separator between the name and value. If + // the value is a message literal (and thus starts with '<' or '{') + // or an array literal (starting with '[') then the separator is + // optional, and thus may be nil. + Sep *RuneNode + Val ValueNode +} + +// NewMessageFieldNode creates a new *MessageFieldNode. All args except sep +// must be non-nil. +func NewMessageFieldNode(name *FieldReferenceNode, sep *RuneNode, val ValueNode) *MessageFieldNode { + if name == nil { + panic("name is nil") + } + if val == nil { + panic("val is nil") + } + numChildren := 2 + if sep != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name) + if sep != nil { + children = append(children, sep) + } + children = append(children, val) + + return &MessageFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Sep: sep, + Val: val, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go new file mode 100644 index 00000000..53301946 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go @@ -0,0 +1,492 @@ +package ast + +// VisitFunc is used to examine a node in the AST when walking the tree. +// It returns true or false as to whether or not the descendants of the +// given node should be visited. If it returns true, the node's children +// will be visisted; if false, they will not. When returning true, it +// can also return a new VisitFunc to use for the children. If it returns +// (true, nil), then the current function will be re-used when visiting +// the children. +// +// See also the Visitor type. +type VisitFunc func(Node) (bool, VisitFunc) + +// Walk conducts a walk of the AST rooted at the given root using the +// given function. It performs a "pre-order traversal", visiting a +// given AST node before it visits that node's descendants. +func Walk(root Node, v VisitFunc) { + ok, next := v(root) + if !ok { + return + } + if next != nil { + v = next + } + if comp, ok := root.(CompositeNode); ok { + for _, child := range comp.Children() { + Walk(child, v) + } + } +} + +// Visitor provides a technique for walking the AST that allows for +// dynamic dispatch, where a particular function is invoked based on +// the runtime type of the argument. +// +// It consists of a number of functions, each of which matches a +// concrete Node type. It also includes functions for sub-interfaces +// of Node and the Node interface itself, to be used as broader +// "catch all" functions. +// +// To use a visitor, provide a function for the node types of +// interest and pass visitor.Visit as the function to a Walk operation. +// When a node is traversed, the corresponding function field of +// the visitor is invoked, if not nil. If the function for a node's +// concrete type is nil/absent but the function for an interface it +// implements is present, that interface visit function will be used +// instead. If no matching function is present, the traversal will +// continue. If a matching function is present, it will be invoked +// and its response determines how the traversal proceeds. +// +// Every visit function returns (bool, *Visitor). If the bool returned +// is false, the visited node's descendants are skipped. Otherwise, +// traversal will continue into the node's children. If the returned +// visitor is nil, the current visitor will continue to be used. But +// if a non-nil visitor is returned, it will be used to visit the +// node's children. +type Visitor struct { + // VisitFileNode is invoked when visiting a *FileNode in the AST. + VisitFileNode func(*FileNode) (bool, *Visitor) + // VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST. + VisitSyntaxNode func(*SyntaxNode) (bool, *Visitor) + // VisitPackageNode is invoked when visiting a *PackageNode in the AST. + VisitPackageNode func(*PackageNode) (bool, *Visitor) + // VisitImportNode is invoked when visiting an *ImportNode in the AST. + VisitImportNode func(*ImportNode) (bool, *Visitor) + // VisitOptionNode is invoked when visiting an *OptionNode in the AST. + VisitOptionNode func(*OptionNode) (bool, *Visitor) + // VisitOptionNameNode is invoked when visiting an *OptionNameNode in the AST. + VisitOptionNameNode func(*OptionNameNode) (bool, *Visitor) + // VisitFieldReferenceNode is invoked when visiting a *FieldReferenceNode in the AST. + VisitFieldReferenceNode func(*FieldReferenceNode) (bool, *Visitor) + // VisitCompactOptionsNode is invoked when visiting a *CompactOptionsNode in the AST. + VisitCompactOptionsNode func(*CompactOptionsNode) (bool, *Visitor) + // VisitMessageNode is invoked when visiting a *MessageNode in the AST. + VisitMessageNode func(*MessageNode) (bool, *Visitor) + // VisitExtendNode is invoked when visiting an *ExtendNode in the AST. + VisitExtendNode func(*ExtendNode) (bool, *Visitor) + // VisitExtensionRangeNode is invoked when visiting an *ExtensionRangeNode in the AST. + VisitExtensionRangeNode func(*ExtensionRangeNode) (bool, *Visitor) + // VisitReservedNode is invoked when visiting a *ReservedNode in the AST. + VisitReservedNode func(*ReservedNode) (bool, *Visitor) + // VisitRangeNode is invoked when visiting a *RangeNode in the AST. + VisitRangeNode func(*RangeNode) (bool, *Visitor) + // VisitFieldNode is invoked when visiting a *FieldNode in the AST. + VisitFieldNode func(*FieldNode) (bool, *Visitor) + // VisitGroupNode is invoked when visiting a *GroupNode in the AST. + VisitGroupNode func(*GroupNode) (bool, *Visitor) + // VisitMapFieldNode is invoked when visiting a *MapFieldNode in the AST. + VisitMapFieldNode func(*MapFieldNode) (bool, *Visitor) + // VisitMapTypeNode is invoked when visiting a *MapTypeNode in the AST. + VisitMapTypeNode func(*MapTypeNode) (bool, *Visitor) + // VisitOneOfNode is invoked when visiting a *OneOfNode in the AST. + VisitOneOfNode func(*OneOfNode) (bool, *Visitor) + // VisitEnumNode is invoked when visiting an *EnumNode in the AST. + VisitEnumNode func(*EnumNode) (bool, *Visitor) + // VisitEnumValueNode is invoked when visiting an *EnumValueNode in the AST. + VisitEnumValueNode func(*EnumValueNode) (bool, *Visitor) + // VisitServiceNode is invoked when visiting a *ServiceNode in the AST. + VisitServiceNode func(*ServiceNode) (bool, *Visitor) + // VisitRPCNode is invoked when visiting an *RPCNode in the AST. + VisitRPCNode func(*RPCNode) (bool, *Visitor) + // VisitRPCTypeNode is invoked when visiting an *RPCTypeNode in the AST. + VisitRPCTypeNode func(*RPCTypeNode) (bool, *Visitor) + // VisitIdentNode is invoked when visiting an *IdentNode in the AST. + VisitIdentNode func(*IdentNode) (bool, *Visitor) + // VisitCompoundIdentNode is invoked when visiting a *CompoundIdentNode in the AST. + VisitCompoundIdentNode func(*CompoundIdentNode) (bool, *Visitor) + // VisitStringLiteralNode is invoked when visiting a *StringLiteralNode in the AST. + VisitStringLiteralNode func(*StringLiteralNode) (bool, *Visitor) + // VisitCompoundStringLiteralNode is invoked when visiting a *CompoundStringLiteralNode in the AST. + VisitCompoundStringLiteralNode func(*CompoundStringLiteralNode) (bool, *Visitor) + // VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST. + VisitUintLiteralNode func(*UintLiteralNode) (bool, *Visitor) + // VisitPositiveUintLiteralNode is invoked when visiting a *PositiveUintLiteralNode in the AST. + VisitPositiveUintLiteralNode func(*PositiveUintLiteralNode) (bool, *Visitor) + // VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST. + VisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) (bool, *Visitor) + // VisitFloatLiteralNode is invoked when visiting a *FloatLiteralNode in the AST. + VisitFloatLiteralNode func(*FloatLiteralNode) (bool, *Visitor) + // VisitSpecialFloatLiteralNode is invoked when visiting a *SpecialFloatLiteralNode in the AST. + VisitSpecialFloatLiteralNode func(*SpecialFloatLiteralNode) (bool, *Visitor) + // VisitSignedFloatLiteralNode is invoked when visiting a *SignedFloatLiteralNode in the AST. + VisitSignedFloatLiteralNode func(*SignedFloatLiteralNode) (bool, *Visitor) + // VisitBoolLiteralNode is invoked when visiting a *BoolLiteralNode in the AST. + VisitBoolLiteralNode func(*BoolLiteralNode) (bool, *Visitor) + // VisitArrayLiteralNode is invoked when visiting an *ArrayLiteralNode in the AST. + VisitArrayLiteralNode func(*ArrayLiteralNode) (bool, *Visitor) + // VisitMessageLiteralNode is invoked when visiting a *MessageLiteralNode in the AST. + VisitMessageLiteralNode func(*MessageLiteralNode) (bool, *Visitor) + // VisitMessageFieldNode is invoked when visiting a *MessageFieldNode in the AST. + VisitMessageFieldNode func(*MessageFieldNode) (bool, *Visitor) + // VisitKeywordNode is invoked when visiting a *KeywordNode in the AST. + VisitKeywordNode func(*KeywordNode) (bool, *Visitor) + // VisitRuneNode is invoked when visiting a *RuneNode in the AST. + VisitRuneNode func(*RuneNode) (bool, *Visitor) + // VisitEmptyDeclNode is invoked when visiting a *EmptyDeclNode in the AST. + VisitEmptyDeclNode func(*EmptyDeclNode) (bool, *Visitor) + + // VisitFieldDeclNode is invoked when visiting a FieldDeclNode in the AST. + // This function is used when no concrete type function is provided. If + // both this and VisitMessageDeclNode are provided, and a node implements + // both (such as *GroupNode and *MapFieldNode), this function will be + // invoked and not the other. + VisitFieldDeclNode func(FieldDeclNode) (bool, *Visitor) + // VisitMessageDeclNode is invoked when visiting a MessageDeclNode in the AST. + // This function is used when no concrete type function is provided. + VisitMessageDeclNode func(MessageDeclNode) (bool, *Visitor) + + // VisitIdentValueNode is invoked when visiting an IdentValueNode in the AST. + // This function is used when no concrete type function is provided. + VisitIdentValueNode func(IdentValueNode) (bool, *Visitor) + // VisitStringValueNode is invoked when visiting a StringValueNode in the AST. + // This function is used when no concrete type function is provided. + VisitStringValueNode func(StringValueNode) (bool, *Visitor) + // VisitIntValueNode is invoked when visiting an IntValueNode in the AST. + // This function is used when no concrete type function is provided. If + // both this and VisitFloatValueNode are provided, and a node implements + // both (such as *UintLiteralNode), this function will be invoked and + // not the other. + VisitIntValueNode func(IntValueNode) (bool, *Visitor) + // VisitFloatValueNode is invoked when visiting a FloatValueNode in the AST. + // This function is used when no concrete type function is provided. + VisitFloatValueNode func(FloatValueNode) (bool, *Visitor) + // VisitValueNode is invoked when visiting a ValueNode in the AST. This + // function is used when no concrete type function is provided and no + // more specific ValueNode function is provided that matches the node. + VisitValueNode func(ValueNode) (bool, *Visitor) + + // VisitTerminalNode is invoked when visiting a TerminalNode in the AST. + // This function is used when no concrete type function is provided + // no more specific interface type function is provided. + VisitTerminalNode func(TerminalNode) (bool, *Visitor) + // VisitCompositeNode is invoked when visiting a CompositeNode in the AST. + // This function is used when no concrete type function is provided + // no more specific interface type function is provided. + VisitCompositeNode func(CompositeNode) (bool, *Visitor) + // VisitNode is invoked when visiting a Node in the AST. This + // function is only used when no other more specific function is + // provided. + VisitNode func(Node) (bool, *Visitor) +} + +// Visit provides the Visitor's implementation of VisitFunc, to be +// used with Walk operations. +func (v *Visitor) Visit(n Node) (bool, VisitFunc) { + var ok, matched bool + var next *Visitor + switch n := n.(type) { + case *FileNode: + if v.VisitFileNode != nil { + matched = true + ok, next = v.VisitFileNode(n) + } + case *SyntaxNode: + if v.VisitSyntaxNode != nil { + matched = true + ok, next = v.VisitSyntaxNode(n) + } + case *PackageNode: + if v.VisitPackageNode != nil { + matched = true + ok, next = v.VisitPackageNode(n) + } + case *ImportNode: + if v.VisitImportNode != nil { + matched = true + ok, next = v.VisitImportNode(n) + } + case *OptionNode: + if v.VisitOptionNode != nil { + matched = true + ok, next = v.VisitOptionNode(n) + } + case *OptionNameNode: + if v.VisitOptionNameNode != nil { + matched = true + ok, next = v.VisitOptionNameNode(n) + } + case *FieldReferenceNode: + if v.VisitFieldReferenceNode != nil { + matched = true + ok, next = v.VisitFieldReferenceNode(n) + } + case *CompactOptionsNode: + if v.VisitCompactOptionsNode != nil { + matched = true + ok, next = v.VisitCompactOptionsNode(n) + } + case *MessageNode: + if v.VisitMessageNode != nil { + matched = true + ok, next = v.VisitMessageNode(n) + } + case *ExtendNode: + if v.VisitExtendNode != nil { + matched = true + ok, next = v.VisitExtendNode(n) + } + case *ExtensionRangeNode: + if v.VisitExtensionRangeNode != nil { + matched = true + ok, next = v.VisitExtensionRangeNode(n) + } + case *ReservedNode: + if v.VisitReservedNode != nil { + matched = true + ok, next = v.VisitReservedNode(n) + } + case *RangeNode: + if v.VisitRangeNode != nil { + matched = true + ok, next = v.VisitRangeNode(n) + } + case *FieldNode: + if v.VisitFieldNode != nil { + matched = true + ok, next = v.VisitFieldNode(n) + } + case *GroupNode: + if v.VisitGroupNode != nil { + matched = true + ok, next = v.VisitGroupNode(n) + } + case *MapFieldNode: + if v.VisitMapFieldNode != nil { + matched = true + ok, next = v.VisitMapFieldNode(n) + } + case *MapTypeNode: + if v.VisitMapTypeNode != nil { + matched = true + ok, next = v.VisitMapTypeNode(n) + } + case *OneOfNode: + if v.VisitOneOfNode != nil { + matched = true + ok, next = v.VisitOneOfNode(n) + } + case *EnumNode: + if v.VisitEnumNode != nil { + matched = true + ok, next = v.VisitEnumNode(n) + } + case *EnumValueNode: + if v.VisitEnumValueNode != nil { + matched = true + ok, next = v.VisitEnumValueNode(n) + } + case *ServiceNode: + if v.VisitServiceNode != nil { + matched = true + ok, next = v.VisitServiceNode(n) + } + case *RPCNode: + if v.VisitRPCNode != nil { + matched = true + ok, next = v.VisitRPCNode(n) + } + case *RPCTypeNode: + if v.VisitRPCTypeNode != nil { + matched = true + ok, next = v.VisitRPCTypeNode(n) + } + case *IdentNode: + if v.VisitIdentNode != nil { + matched = true + ok, next = v.VisitIdentNode(n) + } + case *CompoundIdentNode: + if v.VisitCompoundIdentNode != nil { + matched = true + ok, next = v.VisitCompoundIdentNode(n) + } + case *StringLiteralNode: + if v.VisitStringLiteralNode != nil { + matched = true + ok, next = v.VisitStringLiteralNode(n) + } + case *CompoundStringLiteralNode: + if v.VisitCompoundStringLiteralNode != nil { + matched = true + ok, next = v.VisitCompoundStringLiteralNode(n) + } + case *UintLiteralNode: + if v.VisitUintLiteralNode != nil { + matched = true + ok, next = v.VisitUintLiteralNode(n) + } + case *PositiveUintLiteralNode: + if v.VisitPositiveUintLiteralNode != nil { + matched = true + ok, next = v.VisitPositiveUintLiteralNode(n) + } + case *NegativeIntLiteralNode: + if v.VisitNegativeIntLiteralNode != nil { + matched = true + ok, next = v.VisitNegativeIntLiteralNode(n) + } + case *FloatLiteralNode: + if v.VisitFloatLiteralNode != nil { + matched = true + ok, next = v.VisitFloatLiteralNode(n) + } + case *SpecialFloatLiteralNode: + if v.VisitSpecialFloatLiteralNode != nil { + matched = true + ok, next = v.VisitSpecialFloatLiteralNode(n) + } + case *SignedFloatLiteralNode: + if v.VisitSignedFloatLiteralNode != nil { + matched = true + ok, next = v.VisitSignedFloatLiteralNode(n) + } + case *BoolLiteralNode: + if v.VisitBoolLiteralNode != nil { + matched = true + ok, next = v.VisitBoolLiteralNode(n) + } + case *ArrayLiteralNode: + if v.VisitArrayLiteralNode != nil { + matched = true + ok, next = v.VisitArrayLiteralNode(n) + } + case *MessageLiteralNode: + if v.VisitMessageLiteralNode != nil { + matched = true + ok, next = v.VisitMessageLiteralNode(n) + } + case *MessageFieldNode: + if v.VisitMessageFieldNode != nil { + matched = true + ok, next = v.VisitMessageFieldNode(n) + } + case *KeywordNode: + if v.VisitKeywordNode != nil { + matched = true + ok, next = v.VisitKeywordNode(n) + } + case *RuneNode: + if v.VisitRuneNode != nil { + matched = true + ok, next = v.VisitRuneNode(n) + } + case *EmptyDeclNode: + if v.VisitEmptyDeclNode != nil { + matched = true + ok, next = v.VisitEmptyDeclNode(n) + } + } + + if !matched { + // Visitor provided no concrete type visit function, so + // check interface types. We do this in several passes + // to provide "priority" for matched interfaces for nodes + // that actually implement more than one interface. + // + // For example, StringLiteralNode implements both + // StringValueNode and ValueNode. Both cases could match + // so the first case is what would match. So if we want + // to test against either, they need to be in different + // switch statements. + switch n := n.(type) { + case FieldDeclNode: + if v.VisitFieldDeclNode != nil { + matched = true + ok, next = v.VisitFieldDeclNode(n) + } + case IdentValueNode: + if v.VisitIdentValueNode != nil { + matched = true + ok, next = v.VisitIdentValueNode(n) + } + case StringValueNode: + if v.VisitStringValueNode != nil { + matched = true + ok, next = v.VisitStringValueNode(n) + } + case IntValueNode: + if v.VisitIntValueNode != nil { + matched = true + ok, next = v.VisitIntValueNode(n) + } + } + } + + if !matched { + // These two are excluded from the above switch so that + // if visitor provides both VisitIntValueNode and + // VisitFloatValueNode, we'll prefer VisitIntValueNode + // for *UintLiteralNode (which implements both). Similarly, + // that way we prefer VisitFieldDeclNode over + // VisitMessageDeclNode when visiting a *GroupNode. + switch n := n.(type) { + case FloatValueNode: + if v.VisitFloatValueNode != nil { + matched = true + ok, next = v.VisitFloatValueNode(n) + } + case MessageDeclNode: + if v.VisitMessageDeclNode != nil { + matched = true + ok, next = v.VisitMessageDeclNode(n) + } + } + } + + if !matched { + switch n := n.(type) { + case ValueNode: + if v.VisitValueNode != nil { + matched = true + ok, next = v.VisitValueNode(n) + } + } + } + + if !matched { + switch n := n.(type) { + case TerminalNode: + if v.VisitTerminalNode != nil { + matched = true + ok, next = v.VisitTerminalNode(n) + } + case CompositeNode: + if v.VisitCompositeNode != nil { + matched = true + ok, next = v.VisitCompositeNode(n) + } + } + } + + if !matched { + // finally, fallback to most generic visit function + if v.VisitNode != nil { + matched = true + ok, next = v.VisitNode(n) + } + } + + if !matched { + // keep descending with the current visitor + return true, nil + } + + if !ok { + return false, nil + } + if next != nil { + return true, next.Visit + } + return true, v.Visit +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go new file mode 100644 index 00000000..c6446d34 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go @@ -0,0 +1,10 @@ +// Package protoparse provides functionality for parsing *.proto source files +// into descriptors that can be used with other protoreflect packages, like +// dynamic messages and dynamic GRPC clients. +// +// This package links in other packages that include compiled descriptors for +// the various "google/protobuf/*.proto" files that are included with protoc. +// That way, like when invoking protoc, programs need not supply copies of these +// "builtin" files. Though if copies of the files are provided, they will be +// used instead of the builtin descriptors. +package protoparse diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go new file mode 100644 index 00000000..c71d651d --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go @@ -0,0 +1,117 @@ +package protoparse + +import ( + "errors" + "fmt" + + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" + + "github.com/jhump/protoreflect/desc/protoparse/ast" +) + +// SourcePos is the same as ast.SourcePos. This alias exists for +// backwards compatibility (SourcePos used to be defined in this package.) +type SourcePos = ast.SourcePos + +// ErrInvalidSource is a sentinel error that is returned by calls to +// Parser.ParseFiles and Parser.ParseFilesButDoNotLink in the event that syntax +// or link errors are encountered, but the parser's configured ErrorReporter +// always returns nil. +var ErrInvalidSource = reporter.ErrInvalidSource + +// ErrNoSyntax is a sentinel error that may be passed to a warning reporter. +// The error the reporter receives will be wrapped with source position that +// indicates the file that had no syntax statement. +var ErrNoSyntax = parser.ErrNoSyntax + +// ErrLookupImportAndProtoSet is the error returned if both LookupImport and LookupImportProto are set. +// +// Deprecated: This error is no longer used. It is now legal to set both LookupImport and LookupImportProto +// fields on the Parser. +var ErrLookupImportAndProtoSet = errors.New("both LookupImport and LookupImportProto set") + +// ErrorReporter is responsible for reporting the given error. If the reporter +// returns a non-nil error, parsing/linking will abort with that error. If the +// reporter returns nil, parsing will continue, allowing the parser to try to +// report as many syntax and/or link errors as it can find. +type ErrorReporter = reporter.ErrorReporter + +// WarningReporter is responsible for reporting the given warning. This is used +// for indicating non-error messages to the calling program for things that do +// not cause the parse to fail but are considered bad practice. Though they are +// just warnings, the details are supplied to the reporter via an error type. +type WarningReporter = reporter.WarningReporter + +// ErrorWithPos is an error about a proto source file that includes information +// about the location in the file that caused the error. +// +// The value of Error() will contain both the SourcePos and Underlying error. +// The value of Unwrap() will only be the Underlying error. +type ErrorWithPos = reporter.ErrorWithPos + +// ErrorWithSourcePos is an error about a proto source file that includes +// information about the location in the file that caused the error. +// +// Errors that include source location information *might* be of this type. +// However, calling code that is trying to examine errors with location info +// should instead look for instances of the ErrorWithPos interface, which +// will find other kinds of errors. This type is only exported for backwards +// compatibility. +// +// SourcePos should always be set and never nil. +type ErrorWithSourcePos struct { + Underlying error + Pos *SourcePos +} + +// Error implements the error interface +func (e ErrorWithSourcePos) Error() string { + sourcePos := e.GetPosition() + return fmt.Sprintf("%s: %v", sourcePos, e.Underlying) +} + +// GetPosition implements the ErrorWithPos interface, supplying a location in +// proto source that caused the error. +func (e ErrorWithSourcePos) GetPosition() SourcePos { + if e.Pos == nil { + return SourcePos{Filename: ""} + } + return *e.Pos +} + +// Unwrap implements the ErrorWithPos interface, supplying the underlying +// error. This error will not include location information. +func (e ErrorWithSourcePos) Unwrap() error { + return e.Underlying +} + +var _ ErrorWithPos = ErrorWithSourcePos{} + +func toErrorWithSourcePos(err ErrorWithPos) ErrorWithPos { + pos := err.GetPosition() + return ErrorWithSourcePos{ + Underlying: err.Unwrap(), + Pos: &pos, + } +} + +// ErrorUnusedImport may be passed to a warning reporter when an unused +// import is detected. The error the reporter receives will be wrapped +// with source position that indicates the file and line where the import +// statement appeared. +type ErrorUnusedImport = linker.ErrorUnusedImport + +type errorWithFilename struct { + underlying error + filename string +} + +func (e errorWithFilename) Error() string { + return fmt.Sprintf("%s: %v", e.filename, e.underlying) +} + +func (e errorWithFilename) Unwrap() error { + return e.underlying +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go new file mode 100644 index 00000000..a1312d11 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go @@ -0,0 +1,680 @@ +package protoparse + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/bufbuild/protocompile" + ast2 "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/options" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/protoutil" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/sourceinfo" + "github.com/bufbuild/protocompile/walk" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/desc/protoparse/ast" +) + +// FileAccessor is an abstraction for opening proto source files. It takes the +// name of the file to open and returns either the input reader or an error. +type FileAccessor func(filename string) (io.ReadCloser, error) + +// FileContentsFromMap returns a FileAccessor that uses the given map of file +// contents. This allows proto source files to be constructed in memory and +// easily supplied to a parser. The map keys are the paths to the proto source +// files, and the values are the actual proto source contents. +func FileContentsFromMap(files map[string]string) FileAccessor { + return func(filename string) (io.ReadCloser, error) { + contents, ok := files[filename] + if !ok { + return nil, os.ErrNotExist + } + return ioutil.NopCloser(strings.NewReader(contents)), nil + } +} + +// Parser parses proto source into descriptors. +type Parser struct { + // The paths used to search for dependencies that are referenced in import + // statements in proto source files. If no import paths are provided then + // "." (current directory) is assumed to be the only import path. + // + // This setting is only used during ParseFiles operations. Since calls to + // ParseFilesButDoNotLink do not link, there is no need to load and parse + // dependencies. + ImportPaths []string + + // If true, the supplied file names/paths need not necessarily match how the + // files are referenced in import statements. The parser will attempt to + // match import statements to supplied paths, "guessing" the import paths + // for the files. Note that this inference is not perfect and link errors + // could result. It works best when all proto files are organized such that + // a single import path can be inferred (e.g. all files under a single tree + // with import statements all being relative to the root of this tree). + InferImportPaths bool + + // LookupImport is a function that accepts a filename and + // returns a file descriptor, which will be consulted when resolving imports. + // This allows a compiled Go proto in another Go module to be referenced + // in the proto(s) being parsed. + // + // In the event of a filename collision, Accessor is consulted first, + // then LookupImport is consulted, and finally the well-known protos + // are used. + // + // For example, in order to automatically look up compiled Go protos that + // have been imported and be able to use them as imports, set this to + // desc.LoadFileDescriptor. + LookupImport func(string) (*desc.FileDescriptor, error) + + // LookupImportProto has the same functionality as LookupImport, however it returns + // a FileDescriptorProto instead of a FileDescriptor. + LookupImportProto func(string) (*descriptorpb.FileDescriptorProto, error) + + // Used to create a reader for a given filename, when loading proto source + // file contents. If unset, os.Open is used. If ImportPaths is also empty + // then relative paths are will be relative to the process's current working + // directory. + Accessor FileAccessor + + // If true, the resulting file descriptors will retain source code info, + // that maps elements to their location in the source files as well as + // includes comments found during parsing (and attributed to elements of + // the source file). + IncludeSourceCodeInfo bool + + // If true, the results from ParseFilesButDoNotLink will be passed through + // some additional validations. But only constraints that do not require + // linking can be checked. These include proto2 vs. proto3 language features, + // looking for incorrect usage of reserved names or tags, and ensuring that + // fields have unique tags and that enum values have unique numbers (unless + // the enum allows aliases). + ValidateUnlinkedFiles bool + + // If true, the results from ParseFilesButDoNotLink will have options + // interpreted. Any uninterpretable options (including any custom options or + // options that refer to message and enum types, which can only be + // interpreted after linking) will be left in uninterpreted_options. Also, + // the "default" pseudo-option for fields can only be interpreted for scalar + // fields, excluding enums. (Interpreting default values for enum fields + // requires resolving enum names, which requires linking.) + InterpretOptionsInUnlinkedFiles bool + + // A custom reporter of syntax and link errors. If not specified, the + // default reporter just returns the reported error, which causes parsing + // to abort after encountering a single error. + // + // The reporter is not invoked for system or I/O errors, only for syntax and + // link errors. + ErrorReporter ErrorReporter + + // A custom reporter of warnings. If not specified, warning messages are ignored. + WarningReporter WarningReporter +} + +// ParseFiles parses the named files into descriptors. The returned slice has +// the same number of entries as the give filenames, in the same order. So the +// first returned descriptor corresponds to the first given name, and so on. +// +// All dependencies for all specified files (including transitive dependencies) +// must be accessible via the parser's Accessor or a link error will occur. The +// exception to this rule is that files can import standard Google-provided +// files -- e.g. google/protobuf/*.proto -- without needing to supply sources +// for these files. Like protoc, this parser has a built-in version of these +// files it can use if they aren't explicitly supplied. +// +// If the Parser has no ErrorReporter set and a syntax or link error occurs, +// parsing will abort with the first such error encountered. If there is an +// ErrorReporter configured and it returns non-nil, parsing will abort with the +// error it returns. If syntax or link errors are encountered but the configured +// ErrorReporter always returns nil, the parse fails with ErrInvalidSource. +func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) { + srcInfoMode := protocompile.SourceInfoNone + if p.IncludeSourceCodeInfo { + srcInfoMode = protocompile.SourceInfoExtraComments + } + rep := newReporter(p.ErrorReporter, p.WarningReporter) + res, srcPosAddr := p.getResolver(filenames) + + if p.InferImportPaths { + // we must first compile everything to protos + results, err := parseToProtosRecursive(res, filenames, reporter.NewHandler(rep), srcPosAddr) + if err != nil { + return nil, err + } + // then we can infer import paths + // TODO: if this re-writes one of the names in filenames, lookups below will break + results = fixupFilenames(results) + resolverFromResults := protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + res, ok := results[path] + if !ok { + return protocompile.SearchResult{}, os.ErrNotExist + } + return protocompile.SearchResult{ParseResult: noCloneParseResult{res}}, nil + }) + res = protocompile.CompositeResolver{resolverFromResults, res} + } + + c := protocompile.Compiler{ + Resolver: res, + MaxParallelism: 1, + SourceInfoMode: srcInfoMode, + Reporter: rep, + } + results, err := c.Compile(context.Background(), filenames...) + if err != nil { + return nil, err + } + + fds := make([]protoreflect.FileDescriptor, len(results)) + for i, res := range results { + if linkRes, ok := res.(linker.Result); ok { + removeDynamicExtensions(linkRes.FileDescriptorProto()) + } + fds[i] = results[i] + } + return desc.WrapFiles(fds) +} + +type noCloneParseResult struct { + parser.Result +} + +func (r noCloneParseResult) Clone() parser.Result { + // protocompile will clone parser.Result to make sure it can't be shared + // with other compilation operations (which would not be thread-safe). + // However, this parse result cannot be shared with another compile + // operation. That means the clone is unnecessary; so we skip it, to avoid + // the associated performance costs. + return r.Result +} + +// ParseFilesButDoNotLink parses the named files into descriptor protos. The +// results are just protos, not fully-linked descriptors. It is possible that +// descriptors are invalid and still be returned in parsed form without error +// due to the fact that the linking step is skipped (and thus many validation +// steps omitted). +// +// There are a few side effects to not linking the descriptors: +// 1. No options will be interpreted. Options can refer to extensions or have +// message and enum types. Without linking, these extension and type +// references are not resolved, so the options may not be interpretable. +// So all options will appear in UninterpretedOption fields of the various +// descriptor options messages. +// 2. Type references will not be resolved. This means that the actual type +// names in the descriptors may be unqualified and even relative to the +// scope in which the type reference appears. This goes for fields that +// have message and enum types. It also applies to methods and their +// references to request and response message types. +// 3. Type references are not known. For non-scalar fields, until the type +// name is resolved (during linking), it is not known whether the type +// refers to a message or an enum. So all fields with such type references +// will not have their Type set, only the TypeName. +// +// This method will still validate the syntax of parsed files. If the parser's +// ValidateUnlinkedFiles field is true, additional checks, beyond syntax will +// also be performed. +// +// If the Parser has no ErrorReporter set and a syntax error occurs, parsing +// will abort with the first such error encountered. If there is an +// ErrorReporter configured and it returns non-nil, parsing will abort with the +// error it returns. If syntax errors are encountered but the configured +// ErrorReporter always returns nil, the parse fails with ErrInvalidSource. +func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*descriptorpb.FileDescriptorProto, error) { + rep := newReporter(p.ErrorReporter, p.WarningReporter) + res, _ := p.getResolver(filenames) + results, err := parseToProtos(res, filenames, reporter.NewHandler(rep), p.ValidateUnlinkedFiles) + if err != nil { + return nil, err + } + + if p.InferImportPaths { + resultsMap := make(map[string]parser.Result, len(results)) + for _, res := range results { + resultsMap[res.FileDescriptorProto().GetName()] = res + } + resultsMap = fixupFilenames(resultsMap) + for i := range filenames { + results[i] = resultsMap[filenames[i]] + } + } + + protos := make([]*descriptorpb.FileDescriptorProto, len(results)) + for i, res := range results { + protos[i] = res.FileDescriptorProto() + var optsIndex options.Index + if p.InterpretOptionsInUnlinkedFiles { + var err error + optsIndex, err = options.InterpretUnlinkedOptions(res) + if err != nil { + return nil, err + } + removeDynamicExtensions(protos[i]) + } + if p.IncludeSourceCodeInfo { + protos[i].SourceCodeInfo = sourceinfo.GenerateSourceInfo(res.AST(), optsIndex) + } + } + + return protos, nil +} + +// ParseToAST parses the named files into ASTs, or Abstract Syntax Trees. This +// is for consumers of proto files that don't care about compiling the files to +// descriptors, but care deeply about a non-lossy structured representation of +// the source (since descriptors are lossy). This includes formatting tools and +// possibly linters, too. +// +// If the requested filenames include standard imports (such as +// "google/protobuf/empty.proto") and no source is provided, the corresponding +// AST in the returned slice will be nil. These standard imports are only +// available for use as descriptors; no source is available unless it is +// provided by the configured Accessor. +// +// If the Parser has no ErrorReporter set and a syntax error occurs, parsing +// will abort with the first such error encountered. If there is an +// ErrorReporter configured and it returns non-nil, parsing will abort with the +// error it returns. If syntax errors are encountered but the configured +// ErrorReporter always returns nil, the parse fails with ErrInvalidSource. +func (p Parser) ParseToAST(filenames ...string) ([]*ast.FileNode, error) { + rep := newReporter(p.ErrorReporter, p.WarningReporter) + res, _ := p.getResolver(filenames) + asts, _, err := parseToASTs(res, filenames, reporter.NewHandler(rep)) + if err != nil { + return nil, err + } + results := make([]*ast.FileNode, len(asts)) + for i := range asts { + if asts[i] == nil { + // should not be possible but... + return nil, fmt.Errorf("resolver did not produce source for %v", filenames[i]) + } + results[i] = convertAST(asts[i]) + } + return results, nil +} + +func parseToAST(res protocompile.Resolver, filename string, rep *reporter.Handler) (*ast2.FileNode, parser.Result, error) { + searchResult, err := res.FindFileByPath(filename) + if err != nil { + _ = rep.HandleError(err) + return nil, nil, rep.Error() + } + switch { + case searchResult.ParseResult != nil: + return nil, searchResult.ParseResult, nil + case searchResult.Proto != nil: + return nil, parser.ResultWithoutAST(searchResult.Proto), nil + case searchResult.Desc != nil: + return nil, parser.ResultWithoutAST(protoutil.ProtoFromFileDescriptor(searchResult.Desc)), nil + case searchResult.AST != nil: + return searchResult.AST, nil, nil + case searchResult.Source != nil: + astRoot, err := parser.Parse(filename, searchResult.Source, rep) + return astRoot, nil, err + default: + _ = rep.HandleError(fmt.Errorf("resolver did not produce a result for %v", filename)) + return nil, nil, rep.Error() + } +} + +func parseToASTs(res protocompile.Resolver, filenames []string, rep *reporter.Handler) ([]*ast2.FileNode, []parser.Result, error) { + asts := make([]*ast2.FileNode, len(filenames)) + results := make([]parser.Result, len(filenames)) + for i := range filenames { + asts[i], results[i], _ = parseToAST(res, filenames[i], rep) + if rep.ReporterError() != nil { + break + } + } + return asts, results, rep.Error() +} + +func parseToProtos(res protocompile.Resolver, filenames []string, rep *reporter.Handler, validate bool) ([]parser.Result, error) { + asts, results, err := parseToASTs(res, filenames, rep) + if err != nil { + return nil, err + } + for i := range results { + if results[i] != nil { + continue + } + var err error + results[i], err = parser.ResultFromAST(asts[i], validate, rep) + if err != nil { + return nil, err + } + } + return results, nil +} + +func parseToProtosRecursive(res protocompile.Resolver, filenames []string, rep *reporter.Handler, srcPosAddr *SourcePos) (map[string]parser.Result, error) { + results := make(map[string]parser.Result, len(filenames)) + for _, filename := range filenames { + parseToProtoRecursive(res, filename, rep, srcPosAddr, results) + } + return results, rep.Error() +} + +func parseToProtoRecursive(res protocompile.Resolver, filename string, rep *reporter.Handler, srcPosAddr *SourcePos, results map[string]parser.Result) { + if _, ok := results[filename]; ok { + // already processed this one + return + } + results[filename] = nil // placeholder entry + + astRoot, parseResult, _ := parseToAST(res, filename, rep) + if rep.ReporterError() != nil { + return + } + if parseResult == nil { + parseResult, _ = parser.ResultFromAST(astRoot, true, rep) + if rep.ReporterError() != nil { + return + } + } + results[filename] = parseResult + + for _, decl := range astRoot.Decls { + imp, ok := decl.(*ast2.ImportNode) + if !ok { + continue + } + func() { + orig := *srcPosAddr + *srcPosAddr = astRoot.NodeInfo(imp.Name).Start() + defer func() { + *srcPosAddr = orig + }() + + parseToProtoRecursive(res, imp.Name.AsString(), rep, srcPosAddr, results) + }() + if rep.ReporterError() != nil { + return + } + } +} + +func newReporter(errRep ErrorReporter, warnRep WarningReporter) reporter.Reporter { + if errRep != nil { + delegate := errRep + errRep = func(err ErrorWithPos) error { + if _, ok := err.(ErrorWithSourcePos); !ok { + err = toErrorWithSourcePos(err) + } + return delegate(err) + } + } + if warnRep != nil { + delegate := warnRep + warnRep = func(err ErrorWithPos) { + if _, ok := err.(ErrorWithSourcePos); !ok { + err = toErrorWithSourcePos(err) + } + delegate(err) + } + } + return reporter.NewReporter(errRep, warnRep) +} + +func (p Parser) getResolver(filenames []string) (protocompile.Resolver, *SourcePos) { + var srcPos SourcePos + accessor := p.Accessor + if accessor == nil { + accessor = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + } + sourceResolver := &protocompile.SourceResolver{ + Accessor: func(filename string) (io.ReadCloser, error) { + in, err := accessor(filename) + if err != nil { + if !strings.Contains(err.Error(), filename) { + // errors that don't include the filename that failed are no bueno + err = errorWithFilename{filename: filename, underlying: err} + } + if srcPos.Filename != "" { + err = reporter.Error(srcPos, err) + } + } + return in, err + }, + ImportPaths: p.ImportPaths, + } + var importResolver protocompile.CompositeResolver + if p.LookupImport != nil { + importResolver = append(importResolver, protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + fd, err := p.LookupImport(path) + if err != nil { + return protocompile.SearchResult{}, err + } + return protocompile.SearchResult{Desc: fd.UnwrapFile()}, nil + })) + } + if p.LookupImportProto != nil { + importResolver = append(importResolver, protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + fd, err := p.LookupImportProto(path) + if err != nil { + return protocompile.SearchResult{}, err + } + return protocompile.SearchResult{Proto: fd}, nil + })) + } + backupResolver := protocompile.WithStandardImports(importResolver) + mustBeSource := make(map[string]struct{}, len(filenames)) + for _, name := range filenames { + mustBeSource[name] = struct{}{} + } + return protocompile.CompositeResolver{ + sourceResolver, + protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + if _, ok := mustBeSource[path]; ok { + return protocompile.SearchResult{}, os.ErrNotExist + } + return backupResolver.FindFileByPath(path) + }), + }, &srcPos +} + +func fixupFilenames(protos map[string]parser.Result) map[string]parser.Result { + // In the event that the given filenames (keys in the supplied map) do not + // match the actual paths used in 'import' statements in the files, we try + // to revise names in the protos so that they will match and be linkable. + revisedProtos := map[string]parser.Result{} + + protoPaths := map[string]struct{}{} + // TODO: this is O(n^2) but could likely be O(n) with a clever data structure (prefix tree that is indexed backwards?) + importCandidates := map[string]map[string]struct{}{} + candidatesAvailable := map[string]struct{}{} + for name := range protos { + candidatesAvailable[name] = struct{}{} + for _, f := range protos { + for _, imp := range f.FileDescriptorProto().Dependency { + if strings.HasSuffix(name, imp) { + candidates := importCandidates[imp] + if candidates == nil { + candidates = map[string]struct{}{} + importCandidates[imp] = candidates + } + candidates[name] = struct{}{} + } + } + } + } + for imp, candidates := range importCandidates { + // if we found multiple possible candidates, use the one that is an exact match + // if it exists, and otherwise, guess that it's the shortest path (fewest elements) + var best string + for c := range candidates { + if _, ok := candidatesAvailable[c]; !ok { + // already used this candidate and re-written its filename accordingly + continue + } + if c == imp { + // exact match! + best = c + break + } + if best == "" { + best = c + } else { + // HACK: we can't actually tell which files is supposed to match + // this import, so arbitrarily pick the "shorter" one (fewest + // path elements) or, on a tie, the lexically earlier one + minLen := strings.Count(best, string(filepath.Separator)) + cLen := strings.Count(c, string(filepath.Separator)) + if cLen < minLen || (cLen == minLen && c < best) { + best = c + } + } + } + if best != "" { + prefix := best[:len(best)-len(imp)] + if len(prefix) > 0 { + protoPaths[prefix] = struct{}{} + } + f := protos[best] + f.FileDescriptorProto().Name = proto.String(imp) + revisedProtos[imp] = f + delete(candidatesAvailable, best) + } + } + + if len(candidatesAvailable) == 0 { + return revisedProtos + } + + if len(protoPaths) == 0 { + for c := range candidatesAvailable { + revisedProtos[c] = protos[c] + } + return revisedProtos + } + + // Any remaining candidates are entry-points (not imported by others), so + // the best bet to "fixing" their file name is to see if they're in one of + // the proto paths we found, and if so strip that prefix. + protoPathStrs := make([]string, len(protoPaths)) + i := 0 + for p := range protoPaths { + protoPathStrs[i] = p + i++ + } + sort.Strings(protoPathStrs) + // we look at paths in reverse order, so we'll use a longer proto path if + // there is more than one match + for c := range candidatesAvailable { + var imp string + for i := len(protoPathStrs) - 1; i >= 0; i-- { + p := protoPathStrs[i] + if strings.HasPrefix(c, p) { + imp = c[len(p):] + break + } + } + if imp != "" { + f := protos[c] + f.FileDescriptorProto().Name = proto.String(imp) + f.FileNode() + revisedProtos[imp] = f + } else { + revisedProtos[c] = protos[c] + } + } + + return revisedProtos +} + +func removeDynamicExtensions(fd *descriptorpb.FileDescriptorProto) { + // protocompile returns descriptors with dynamic extension fields for custom options. + // But protoparse only used known custom options and everything else defined in the + // sources would be stored as unrecognized fields. So to bridge the difference in + // behavior, we need to remove custom options from the given file and add them back + // via serializing-then-de-serializing them back into the options messages. That way, + // statically known options will be properly typed and others will be unrecognized. + // + // This is best effort. So if an error occurs, we'll still return a result, but it + // may include a dynamic extension. + fd.Options = removeDynamicExtensionsFromOptions(fd.Options) + _ = walk.DescriptorProtos(fd, func(_ protoreflect.FullName, msg proto.Message) error { + switch msg := msg.(type) { + case *descriptorpb.DescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + for _, extr := range msg.ExtensionRange { + extr.Options = removeDynamicExtensionsFromOptions(extr.Options) + } + case *descriptorpb.FieldDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.OneofDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.EnumDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.EnumValueDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.ServiceDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.MethodDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + } + return nil + }) +} + +type ptrMsg[T any] interface { + *T + proto.Message +} + +type fieldValue struct { + fd protoreflect.FieldDescriptor + val protoreflect.Value +} + +func removeDynamicExtensionsFromOptions[O ptrMsg[T], T any](opts O) O { + if opts == nil { + return nil + } + var dynamicExtensions []fieldValue + opts.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if fd.IsExtension() { + dynamicExtensions = append(dynamicExtensions, fieldValue{fd: fd, val: val}) + } + return true + }) + + // serialize only these custom options + optsWithOnlyDyn := opts.ProtoReflect().Type().New() + for _, fv := range dynamicExtensions { + optsWithOnlyDyn.Set(fv.fd, fv.val) + } + data, err := proto.MarshalOptions{AllowPartial: true}.Marshal(optsWithOnlyDyn.Interface()) + if err != nil { + // oh, well... can't fix this one + return opts + } + + // and then replace values by clearing these custom options and deserializing + optsClone := proto.Clone(opts).ProtoReflect() + for _, fv := range dynamicExtensions { + optsClone.Clear(fv.fd) + } + err = proto.UnmarshalOptions{AllowPartial: true, Merge: true}.Unmarshal(data, optsClone.Interface()) + if err != nil { + // bummer, can't fix this one + return opts + } + + return optsClone.Interface().(O) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go new file mode 100644 index 00000000..3ae1415a --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go @@ -0,0 +1,175 @@ +package protoparse + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" +) + +var errNoImportPathsForAbsoluteFilePath = errors.New("must specify at least one import path if any absolute file paths are given") + +// ResolveFilenames tries to resolve fileNames into paths that are relative to +// directories in the given importPaths. The returned slice has the results in +// the same order as they are supplied in fileNames. +// +// The resulting names should be suitable for passing to Parser.ParseFiles. +// +// If no import paths are given and any file name is absolute, this returns an +// error. If no import paths are given and all file names are relative, this +// returns the original file names. If a file name is already relative to one +// of the given import paths, it will be unchanged in the returned slice. If a +// file name given is relative to the current working directory, it will be made +// relative to one of the given import paths; but if it cannot be made relative +// (due to no matching import path), an error will be returned. +func ResolveFilenames(importPaths []string, fileNames ...string) ([]string, error) { + if len(importPaths) == 0 { + if containsAbsFilePath(fileNames) { + // We have to do this as otherwise parseProtoFiles can result in duplicate symbols. + // For example, assume we import "foo/bar/bar.proto" in a file "/home/alice/dev/foo/bar/baz.proto" + // as we call ParseFiles("/home/alice/dev/foo/bar/bar.proto","/home/alice/dev/foo/bar/baz.proto") + // with "/home/alice/dev" as our current directory. Due to the recursive nature of parseProtoFiles, + // it will discover the import "foo/bar/bar.proto" in the input file, and call parse on this, + // adding "foo/bar/bar.proto" to the parsed results, as well as "/home/alice/dev/foo/bar/bar.proto" + // from the input file list. This will result in a + // 'duplicate symbol SYMBOL: already defined as field in "/home/alice/dev/foo/bar/bar.proto' + // error being returned from ParseFiles. + return nil, errNoImportPathsForAbsoluteFilePath + } + return fileNames, nil + } + absImportPaths, err := absoluteFilePaths(importPaths) + if err != nil { + return nil, err + } + resolvedFileNames := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + resolvedFileName, err := resolveFilename(absImportPaths, fileName) + if err != nil { + return nil, err + } + // On Windows, the resolved paths will use "\", but proto imports + // require the use of "/". So fix up here. + if filepath.Separator != '/' { + resolvedFileName = strings.Replace(resolvedFileName, string(filepath.Separator), "/", -1) + } + resolvedFileNames = append(resolvedFileNames, resolvedFileName) + } + return resolvedFileNames, nil +} + +func containsAbsFilePath(filePaths []string) bool { + for _, filePath := range filePaths { + if filepath.IsAbs(filePath) { + return true + } + } + return false +} + +func absoluteFilePaths(filePaths []string) ([]string, error) { + absFilePaths := make([]string, 0, len(filePaths)) + for _, filePath := range filePaths { + absFilePath, err := canonicalize(filePath) + if err != nil { + return nil, err + } + absFilePaths = append(absFilePaths, absFilePath) + } + return absFilePaths, nil +} + +func canonicalize(filePath string) (string, error) { + absPath, err := filepath.Abs(filePath) + if err != nil { + return "", err + } + // this is kind of gross, but it lets us construct a resolved path even if some + // path elements do not exist (a single call to filepath.EvalSymlinks would just + // return an error, ENOENT, in that case). + head := absPath + tail := "" + for { + noLinks, err := filepath.EvalSymlinks(head) + if err == nil { + if tail != "" { + return filepath.Join(noLinks, tail), nil + } + return noLinks, nil + } + + if tail == "" { + tail = filepath.Base(head) + } else { + tail = filepath.Join(filepath.Base(head), tail) + } + head = filepath.Dir(head) + if head == "." { + // ran out of path elements to try to resolve + return absPath, nil + } + } +} + +const dotPrefix = "." + string(filepath.Separator) +const dotDotPrefix = ".." + string(filepath.Separator) + +func resolveFilename(absImportPaths []string, fileName string) (string, error) { + if filepath.IsAbs(fileName) { + return resolveAbsFilename(absImportPaths, fileName) + } + + if !strings.HasPrefix(fileName, dotPrefix) && !strings.HasPrefix(fileName, dotDotPrefix) { + // Use of . and .. are assumed to be relative to current working + // directory. So if those aren't present, check to see if the file is + // relative to an import path. + for _, absImportPath := range absImportPaths { + absFileName := filepath.Join(absImportPath, fileName) + _, err := os.Stat(absFileName) + if err != nil { + continue + } + // found it! it was relative to this import path + return fileName, nil + } + } + + // must be relative to current working dir + return resolveAbsFilename(absImportPaths, fileName) +} + +func resolveAbsFilename(absImportPaths []string, fileName string) (string, error) { + absFileName, err := canonicalize(fileName) + if err != nil { + return "", err + } + for _, absImportPath := range absImportPaths { + if isDescendant(absImportPath, absFileName) { + resolvedPath, err := filepath.Rel(absImportPath, absFileName) + if err != nil { + return "", err + } + return resolvedPath, nil + } + } + return "", fmt.Errorf("%s does not reside in any import path", fileName) +} + +// isDescendant returns true if file is a descendant of dir. Both dir and file must +// be cleaned, absolute paths. +func isDescendant(dir, file string) bool { + dir = filepath.Clean(dir) + cur := file + for { + d := filepath.Dir(cur) + if d == dir { + return true + } + if d == "." || d == cur { + // we've run out of path elements + return false + } + cur = d + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt new file mode 100644 index 00000000..ef11ff4a --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt @@ -0,0 +1,6401 @@ +---- desc_test_comments.proto ---- + + +: +desc_test_comments.proto:8:1 +desc_test_comments.proto:156:2 + + + > syntax: +desc_test_comments.proto:8:1 +desc_test_comments.proto:8:19 + Leading detached comment [0]: + This is the first detached comment for the syntax. + + Leading detached comment [1]: + + This is a second detached comment. + + Leading detached comment [2]: + This is a third. + + Leading comments: + Syntax comment... + + Trailing comments: + Syntax trailer. + + + + > package: +desc_test_comments.proto:12:1 +desc_test_comments.proto:12:17 + Leading comments: + And now the package declaration + + + + > options: +desc_test_comments.proto:15:1 +desc_test_comments.proto:15:75 + + + > options > go_package: +desc_test_comments.proto:15:1 +desc_test_comments.proto:15:75 + Leading comments: + option comments FTW!!! + + + + > dependency[0]: +desc_test_comments.proto:17:1 +desc_test_comments.proto:17:45 + + + > public_dependency[0]: +desc_test_comments.proto:17:8 +desc_test_comments.proto:17:14 + + + > dependency[1]: +desc_test_comments.proto:18:1 +desc_test_comments.proto:18:34 + + + > message_type[0]: +desc_test_comments.proto:25:1 +desc_test_comments.proto:113:2 + Leading detached comment [0]: + Multiple white space lines (like above) cannot + be preserved... + + Leading comments: + We need a request for our RPC service below. + + + + > message_type[0] > name: +desc_test_comments.proto:25:68 +desc_test_comments.proto:25:75 + Leading comments: + request with a capital R + Trailing comments: + trailer + + + + > message_type[0] > options: +desc_test_comments.proto:26:9 +desc_test_comments.proto:26:34 + + + > message_type[0] > options > deprecated: +desc_test_comments.proto:26:9 +desc_test_comments.proto:26:34 + Trailing comments: + deprecated! + + + + > message_type[0] > field[0]: +desc_test_comments.proto:29:9 +desc_test_comments.proto:32:132 + Leading comments: + A field comment + + Trailing comments: + field trailer #1... + + + + > message_type[0] > field[0] > label: +desc_test_comments.proto:29:9 +desc_test_comments.proto:29:17 + + + > message_type[0] > field[0] > type: +desc_test_comments.proto:29:18 +desc_test_comments.proto:29:23 + + + > message_type[0] > field[0] > name: +desc_test_comments.proto:29:24 +desc_test_comments.proto:29:27 + + + > message_type[0] > field[0] > number: +desc_test_comments.proto:29:70 +desc_test_comments.proto:29:71 + Leading comments: + tag numero uno + Trailing comments: + tag trailer +that spans multiple lines... +more than two. + + + > message_type[0] > field[0] > options: +desc_test_comments.proto:32:11 +desc_test_comments.proto:32:131 + + + > message_type[0] > field[0] > options > packed: +desc_test_comments.proto:32:12 +desc_test_comments.proto:32:23 + Trailing comments: + packed! + + + > message_type[0] > field[0] > json_name: +desc_test_comments.proto:32:39 +desc_test_comments.proto:32:56 + Trailing comments: + custom JSON! + + + > message_type[0] > field[0] > options > (testprotos.ffubar)[0]: +desc_test_comments.proto:32:77 +desc_test_comments.proto:32:102 + + + > message_type[0] > field[0] > options > (testprotos.ffubarb): +desc_test_comments.proto:32:104 +desc_test_comments.proto:32:130 + + + > message_type[0] > options: +desc_test_comments.proto:35:27 +desc_test_comments.proto:35:61 + + + > message_type[0] > options > (testprotos.mfubar): +desc_test_comments.proto:35:27 +desc_test_comments.proto:35:61 + Leading comments: + lead mfubar + Trailing comments: + trailing mfubar + + + + > message_type[0] > field[1]: +desc_test_comments.proto:42:29 +desc_test_comments.proto:43:77 + Leading detached comment [0]: + some detached comments + + Leading detached comment [1]: + some detached comments with unicode 这个是值 + + Leading detached comment [2]: + Another field comment + + Leading comments: + label comment + + + > message_type[0] > field[1] > label: +desc_test_comments.proto:42:29 +desc_test_comments.proto:42:37 + + + > message_type[0] > field[1] > type: +desc_test_comments.proto:42:57 +desc_test_comments.proto:42:63 + Leading detached comment [0]: + type comment + + + > message_type[0] > field[1] > name: +desc_test_comments.proto:42:83 +desc_test_comments.proto:42:87 + Leading detached comment [0]: + name comment + + + > message_type[0] > field[1] > number: +desc_test_comments.proto:42:90 +desc_test_comments.proto:42:91 + + + > message_type[0] > field[1] > options: +desc_test_comments.proto:43:17 +desc_test_comments.proto:43:76 + + + > message_type[0] > field[1] > default_value: +desc_test_comments.proto:43:37 +desc_test_comments.proto:43:54 + Leading detached comment [0]: + default lead + Trailing comments: + default trail + + + > message_type[0] > extension_range: +desc_test_comments.proto:46:9 +desc_test_comments.proto:46:31 + Leading comments: + extension range comments are (sadly) not preserved + + + + > message_type[0] > extension_range[0]: +desc_test_comments.proto:46:20 +desc_test_comments.proto:46:30 + + + > message_type[0] > extension_range[0] > start: +desc_test_comments.proto:46:20 +desc_test_comments.proto:46:23 + + + > message_type[0] > extension_range[0] > end: +desc_test_comments.proto:46:27 +desc_test_comments.proto:46:30 + + + > message_type[0] > extension_range: +desc_test_comments.proto:47:9 +desc_test_comments.proto:47:109 + + + > message_type[0] > extension_range[1]: +desc_test_comments.proto:47:20 +desc_test_comments.proto:47:30 + + + > message_type[0] > extension_range[1] > start: +desc_test_comments.proto:47:20 +desc_test_comments.proto:47:23 + + + > message_type[0] > extension_range[1] > end: +desc_test_comments.proto:47:27 +desc_test_comments.proto:47:30 + + + > message_type[0] > extension_range[1] > options: +desc_test_comments.proto:47:31 +desc_test_comments.proto:47:108 + + + > message_type[0] > extension_range[1] > options > (testprotos.exfubarb): +desc_test_comments.proto:47:32 +desc_test_comments.proto:47:74 + + + > message_type[0] > extension_range[1] > options > (testprotos.exfubar)[0]: +desc_test_comments.proto:47:76 +desc_test_comments.proto:47:107 + + + > message_type[0] > reserved_range: +desc_test_comments.proto:51:48 +desc_test_comments.proto:51:77 + Leading detached comment [0]: + another detached comment + + Leading comments: + same for reserved range comments + + + > message_type[0] > reserved_range[0]: +desc_test_comments.proto:51:57 +desc_test_comments.proto:51:65 + + + > message_type[0] > reserved_range[0] > start: +desc_test_comments.proto:51:57 +desc_test_comments.proto:51:59 + + + > message_type[0] > reserved_range[0] > end: +desc_test_comments.proto:51:63 +desc_test_comments.proto:51:65 + + + > message_type[0] > reserved_range[1]: +desc_test_comments.proto:51:67 +desc_test_comments.proto:51:75 + + + > message_type[0] > reserved_range[1] > start: +desc_test_comments.proto:51:67 +desc_test_comments.proto:51:69 + + + > message_type[0] > reserved_range[1] > end: +desc_test_comments.proto:51:73 +desc_test_comments.proto:51:75 + + + > message_type[0] > reserved_name: +desc_test_comments.proto:52:9 +desc_test_comments.proto:52:38 + Trailing comments: + reserved trailers + + + > message_type[0] > reserved_name[0]: +desc_test_comments.proto:52:18 +desc_test_comments.proto:52:23 + + + > message_type[0] > reserved_name[1]: +desc_test_comments.proto:52:25 +desc_test_comments.proto:52:30 + + + > message_type[0] > reserved_name[2]: +desc_test_comments.proto:52:32 +desc_test_comments.proto:52:37 + + + > message_type[0] > field[2]: +desc_test_comments.proto:55:9 +desc_test_comments.proto:69:10 + + + > message_type[0] > field[2] > label: +desc_test_comments.proto:55:9 +desc_test_comments.proto:55:17 + + + > message_type[0] > field[2] > type: +desc_test_comments.proto:55:18 +desc_test_comments.proto:55:23 + + + > message_type[0] > field[2] > name: +desc_test_comments.proto:55:41 +desc_test_comments.proto:55:47 + + + > message_type[0] > field[2] > number: +desc_test_comments.proto:55:50 +desc_test_comments.proto:55:51 + + + > message_type[0] > nested_type[0]: +desc_test_comments.proto:55:9 +desc_test_comments.proto:69:10 + Leading comments: + Group comment with emoji 😀 😍 👻 ❤ 💯 💥 🐶 🦂 🥑 🍻 🌍 🚕 🪐 + + Trailing comments: + trailer for Extras + + + + > message_type[0] > nested_type[0] > name: +desc_test_comments.proto:55:41 +desc_test_comments.proto:55:47 + Leading detached comment [0]: + group name + + + > message_type[0] > field[2] > type_name: +desc_test_comments.proto:55:41 +desc_test_comments.proto:55:47 + + + > message_type[0] > nested_type[0] > options: +desc_test_comments.proto:59:17 +desc_test_comments.proto:59:52 + + + > message_type[0] > nested_type[0] > options > (testprotos.mfubar): +desc_test_comments.proto:59:17 +desc_test_comments.proto:59:52 + Leading comments: + this is a custom option + + + + > message_type[0] > nested_type[0] > field[0]: +desc_test_comments.proto:61:17 +desc_test_comments.proto:61:41 + + + > message_type[0] > nested_type[0] > field[0] > label: +desc_test_comments.proto:61:17 +desc_test_comments.proto:61:25 + + + > message_type[0] > nested_type[0] > field[0] > type: +desc_test_comments.proto:61:26 +desc_test_comments.proto:61:32 + + + > message_type[0] > nested_type[0] > field[0] > name: +desc_test_comments.proto:61:33 +desc_test_comments.proto:61:36 + + + > message_type[0] > nested_type[0] > field[0] > number: +desc_test_comments.proto:61:39 +desc_test_comments.proto:61:40 + + + > message_type[0] > nested_type[0] > field[1]: +desc_test_comments.proto:62:17 +desc_test_comments.proto:62:40 + + + > message_type[0] > nested_type[0] > field[1] > label: +desc_test_comments.proto:62:17 +desc_test_comments.proto:62:25 + + + > message_type[0] > nested_type[0] > field[1] > type: +desc_test_comments.proto:62:26 +desc_test_comments.proto:62:31 + + + > message_type[0] > nested_type[0] > field[1] > name: +desc_test_comments.proto:62:32 +desc_test_comments.proto:62:35 + + + > message_type[0] > nested_type[0] > field[1] > number: +desc_test_comments.proto:62:38 +desc_test_comments.proto:62:39 + + + > message_type[0] > nested_type[0] > options: +desc_test_comments.proto:64:17 +desc_test_comments.proto:64:64 + + + > message_type[0] > nested_type[0] > options > no_standard_descriptor_accessor: +desc_test_comments.proto:64:17 +desc_test_comments.proto:64:64 + + + > message_type[0] > nested_type[0] > field[2]: +desc_test_comments.proto:67:17 +desc_test_comments.proto:67:41 + Leading comments: + Leading comment... + + Trailing comments: + Trailing comment... + + + + > message_type[0] > nested_type[0] > field[2] > label: +desc_test_comments.proto:67:17 +desc_test_comments.proto:67:25 + + + > message_type[0] > nested_type[0] > field[2] > type: +desc_test_comments.proto:67:26 +desc_test_comments.proto:67:32 + + + > message_type[0] > nested_type[0] > field[2] > name: +desc_test_comments.proto:67:33 +desc_test_comments.proto:67:36 + + + > message_type[0] > nested_type[0] > field[2] > number: +desc_test_comments.proto:67:39 +desc_test_comments.proto:67:40 + + + > message_type[0] > enum_type[0]: +desc_test_comments.proto:71:9 +desc_test_comments.proto:93:10 + Trailing comments: + trailer for enum + + + + > message_type[0] > enum_type[0] > name: +desc_test_comments.proto:71:14 +desc_test_comments.proto:71:29 + Trailing comments: + "super"! + + + + > message_type[0] > enum_type[0] > options: +desc_test_comments.proto:75:17 +desc_test_comments.proto:75:43 + + + > message_type[0] > enum_type[0] > options > allow_alias: +desc_test_comments.proto:75:17 +desc_test_comments.proto:75:43 + Leading comments: + allow_alias comments! + + + + > message_type[0] > enum_type[0] > value[0]: +desc_test_comments.proto:77:17 +desc_test_comments.proto:77:86 + + + > message_type[0] > enum_type[0] > value[0] > name: +desc_test_comments.proto:77:17 +desc_test_comments.proto:77:22 + + + > message_type[0] > enum_type[0] > value[0] > number: +desc_test_comments.proto:77:25 +desc_test_comments.proto:77:26 + + + > message_type[0] > enum_type[0] > value[0] > options: +desc_test_comments.proto:77:27 +desc_test_comments.proto:77:85 + + + > message_type[0] > enum_type[0] > value[0] > options > (testprotos.evfubars): +desc_test_comments.proto:77:28 +desc_test_comments.proto:77:56 + + + > message_type[0] > enum_type[0] > value[0] > options > (testprotos.evfubar): +desc_test_comments.proto:77:58 +desc_test_comments.proto:77:84 + + + > message_type[0] > enum_type[0] > value[1]: +desc_test_comments.proto:78:17 +desc_test_comments.proto:78:100 + + + > message_type[0] > enum_type[0] > value[1] > name: +desc_test_comments.proto:78:17 +desc_test_comments.proto:78:22 + + + > message_type[0] > enum_type[0] > value[1] > number: +desc_test_comments.proto:78:25 +desc_test_comments.proto:78:26 + + + > message_type[0] > enum_type[0] > value[1] > options: +desc_test_comments.proto:78:27 +desc_test_comments.proto:78:99 + + + > message_type[0] > enum_type[0] > value[1] > options > (testprotos.evfubaruf): +desc_test_comments.proto:78:29 +desc_test_comments.proto:78:57 + + + > message_type[0] > enum_type[0] > value[1] > options > (testprotos.evfubaru): +desc_test_comments.proto:78:73 +desc_test_comments.proto:78:98 + Leading detached comment [0]: + swoosh! + + + > message_type[0] > enum_type[0] > value[2]: +desc_test_comments.proto:79:17 +desc_test_comments.proto:79:27 + + + > message_type[0] > enum_type[0] > value[2] > name: +desc_test_comments.proto:79:17 +desc_test_comments.proto:79:22 + + + > message_type[0] > enum_type[0] > value[2] > number: +desc_test_comments.proto:79:25 +desc_test_comments.proto:79:26 + + + > message_type[0] > enum_type[0] > value[3]: +desc_test_comments.proto:80:17 +desc_test_comments.proto:80:28 + + + > message_type[0] > enum_type[0] > value[3] > name: +desc_test_comments.proto:80:17 +desc_test_comments.proto:80:23 + + + > message_type[0] > enum_type[0] > value[3] > number: +desc_test_comments.proto:80:26 +desc_test_comments.proto:80:27 + + + > message_type[0] > enum_type[0] > options: +desc_test_comments.proto:82:17 +desc_test_comments.proto:82:52 + + + > message_type[0] > enum_type[0] > options > (testprotos.efubars): +desc_test_comments.proto:82:17 +desc_test_comments.proto:82:52 + + + > message_type[0] > enum_type[0] > value[4]: +desc_test_comments.proto:84:17 +desc_test_comments.proto:84:27 + + + > message_type[0] > enum_type[0] > value[4] > name: +desc_test_comments.proto:84:17 +desc_test_comments.proto:84:22 + + + > message_type[0] > enum_type[0] > value[4] > number: +desc_test_comments.proto:84:25 +desc_test_comments.proto:84:26 + + + > message_type[0] > enum_type[0] > value[5]: +desc_test_comments.proto:85:17 +desc_test_comments.proto:85:29 + + + > message_type[0] > enum_type[0] > value[5] > name: +desc_test_comments.proto:85:17 +desc_test_comments.proto:85:24 + + + > message_type[0] > enum_type[0] > value[5] > number: +desc_test_comments.proto:85:27 +desc_test_comments.proto:85:28 + + + > message_type[0] > enum_type[0] > value[6]: +desc_test_comments.proto:86:17 +desc_test_comments.proto:86:60 + + + > message_type[0] > enum_type[0] > value[6] > name: +desc_test_comments.proto:86:17 +desc_test_comments.proto:86:24 + + + > message_type[0] > enum_type[0] > value[6] > number: +desc_test_comments.proto:86:27 +desc_test_comments.proto:86:28 + + + > message_type[0] > enum_type[0] > value[6] > options: +desc_test_comments.proto:86:29 +desc_test_comments.proto:86:59 + + + > message_type[0] > enum_type[0] > value[6] > options > (testprotos.evfubarsf): +desc_test_comments.proto:86:30 +desc_test_comments.proto:86:58 + + + > message_type[0] > enum_type[0] > value[7]: +desc_test_comments.proto:87:17 +desc_test_comments.proto:87:28 + + + > message_type[0] > enum_type[0] > value[7] > name: +desc_test_comments.proto:87:17 +desc_test_comments.proto:87:23 + + + > message_type[0] > enum_type[0] > value[7] > number: +desc_test_comments.proto:87:26 +desc_test_comments.proto:87:27 + + + > message_type[0] > enum_type[0] > value[8]: +desc_test_comments.proto:88:17 +desc_test_comments.proto:88:31 + + + > message_type[0] > enum_type[0] > value[8] > name: +desc_test_comments.proto:88:17 +desc_test_comments.proto:88:26 + + + > message_type[0] > enum_type[0] > value[8] > number: +desc_test_comments.proto:88:29 +desc_test_comments.proto:88:30 + + + > message_type[0] > enum_type[0] > value[9]: +desc_test_comments.proto:89:17 +desc_test_comments.proto:89:27 + + + > message_type[0] > enum_type[0] > value[9] > name: +desc_test_comments.proto:89:17 +desc_test_comments.proto:89:22 + + + > message_type[0] > enum_type[0] > value[9] > number: +desc_test_comments.proto:89:25 +desc_test_comments.proto:89:26 + + + > message_type[0] > enum_type[0] > value[10]: +desc_test_comments.proto:90:17 +desc_test_comments.proto:90:31 + + + > message_type[0] > enum_type[0] > value[10] > name: +desc_test_comments.proto:90:17 +desc_test_comments.proto:90:23 + + + > message_type[0] > enum_type[0] > value[10] > number: +desc_test_comments.proto:90:26 +desc_test_comments.proto:90:30 + + + > message_type[0] > enum_type[0] > options: +desc_test_comments.proto:92:17 +desc_test_comments.proto:92:50 + + + > message_type[0] > enum_type[0] > options > (testprotos.efubar): +desc_test_comments.proto:92:17 +desc_test_comments.proto:92:50 + + + > message_type[0] > oneof_decl[0]: +desc_test_comments.proto:96:9 +desc_test_comments.proto:101:10 + Leading comments: + can be this or that + + Trailing comments: + trailer for oneof abc + + + + > message_type[0] > oneof_decl[0] > name: +desc_test_comments.proto:96:15 +desc_test_comments.proto:96:18 + + + > message_type[0] > field[3]: +desc_test_comments.proto:99:17 +desc_test_comments.proto:99:33 + + + > message_type[0] > field[3] > type: +desc_test_comments.proto:99:17 +desc_test_comments.proto:99:23 + + + > message_type[0] > field[3] > name: +desc_test_comments.proto:99:24 +desc_test_comments.proto:99:28 + + + > message_type[0] > field[3] > number: +desc_test_comments.proto:99:31 +desc_test_comments.proto:99:32 + + + > message_type[0] > field[4]: +desc_test_comments.proto:100:17 +desc_test_comments.proto:100:32 + + + > message_type[0] > field[4] > type: +desc_test_comments.proto:100:17 +desc_test_comments.proto:100:22 + + + > message_type[0] > field[4] > name: +desc_test_comments.proto:100:23 +desc_test_comments.proto:100:27 + + + > message_type[0] > field[4] > number: +desc_test_comments.proto:100:30 +desc_test_comments.proto:100:31 + + + > message_type[0] > oneof_decl[1]: +desc_test_comments.proto:103:9 +desc_test_comments.proto:109:10 + Leading comments: + can be these or those + + + + > message_type[0] > oneof_decl[1] > name: +desc_test_comments.proto:103:15 +desc_test_comments.proto:103:18 + + + > message_type[0] > oneof_decl[1] > options: +desc_test_comments.proto:105:17 +desc_test_comments.proto:105:89 + + + > message_type[0] > oneof_decl[1] > options > (testprotos.oofubar)[0]: +desc_test_comments.proto:105:17 +desc_test_comments.proto:105:89 + Leading comments: + whoops? + + + + > message_type[0] > field[5]: +desc_test_comments.proto:107:17 +desc_test_comments.proto:107:34 + + + > message_type[0] > field[5] > type: +desc_test_comments.proto:107:17 +desc_test_comments.proto:107:23 + + + > message_type[0] > field[5] > name: +desc_test_comments.proto:107:24 +desc_test_comments.proto:107:29 + + + > message_type[0] > field[5] > number: +desc_test_comments.proto:107:32 +desc_test_comments.proto:107:33 + + + > message_type[0] > field[6]: +desc_test_comments.proto:108:17 +desc_test_comments.proto:108:33 + + + > message_type[0] > field[6] > type: +desc_test_comments.proto:108:17 +desc_test_comments.proto:108:22 + + + > message_type[0] > field[6] > name: +desc_test_comments.proto:108:23 +desc_test_comments.proto:108:28 + + + > message_type[0] > field[6] > number: +desc_test_comments.proto:108:31 +desc_test_comments.proto:108:32 + + + > message_type[0] > field[7]: +desc_test_comments.proto:112:9 +desc_test_comments.proto:112:40 + Leading comments: + map field + + + + > message_type[0] > field[7] > type_name: +desc_test_comments.proto:112:9 +desc_test_comments.proto:112:28 + + + > message_type[0] > field[7] > name: +desc_test_comments.proto:112:29 +desc_test_comments.proto:112:35 + + + > message_type[0] > field[7] > number: +desc_test_comments.proto:112:38 +desc_test_comments.proto:112:39 + + + > extension: +desc_test_comments.proto:117:1 +desc_test_comments.proto:128:2 + Leading detached comment [0]: + And next we'll need some extensions... + + Trailing comments: + trailer for extend block + + + + > extension[0]: +desc_test_comments.proto:125:9 +desc_test_comments.proto:125:37 + Leading comments: + comment for guid1 + + + + > extension[0] > extendee: +desc_test_comments.proto:119:1 +desc_test_comments.proto:119:8 + Leading comments: + extendee comment + + + + > extension[0] > label: +desc_test_comments.proto:125:9 +desc_test_comments.proto:125:17 + + + > extension[0] > type: +desc_test_comments.proto:125:18 +desc_test_comments.proto:125:24 + + + > extension[0] > name: +desc_test_comments.proto:125:25 +desc_test_comments.proto:125:30 + + + > extension[0] > number: +desc_test_comments.proto:125:33 +desc_test_comments.proto:125:36 + + + > extension[1]: +desc_test_comments.proto:127:9 +desc_test_comments.proto:127:37 + Leading comments: + ... and a comment for guid2 + + + + > extension[1] > extendee: +desc_test_comments.proto:119:1 +desc_test_comments.proto:119:8 + + + > extension[1] > label: +desc_test_comments.proto:127:9 +desc_test_comments.proto:127:17 + + + > extension[1] > type: +desc_test_comments.proto:127:18 +desc_test_comments.proto:127:24 + + + > extension[1] > name: +desc_test_comments.proto:127:25 +desc_test_comments.proto:127:30 + + + > extension[1] > number: +desc_test_comments.proto:127:33 +desc_test_comments.proto:127:36 + + + > message_type[1]: +desc_test_comments.proto:131:1 +desc_test_comments.proto:131:115 + Trailing comments: + trailer for AnEmptyMessage + + + > message_type[1] > name: +desc_test_comments.proto:131:36 +desc_test_comments.proto:131:50 + Leading detached comment [0]: + name leading comment + + + > service[0]: +desc_test_comments.proto:134:1 +desc_test_comments.proto:156:2 + Leading comments: + Service comment + + Trailing comments: + service trailer + that spans multiple lines + + + + > service[0] > name: +desc_test_comments.proto:134:28 +desc_test_comments.proto:134:38 + Leading detached comment [0]: + service name + + + > service[0] > options: +desc_test_comments.proto:139:9 +desc_test_comments.proto:139:43 + + + > service[0] > options > (testprotos.sfubar) > id: +desc_test_comments.proto:139:9 +desc_test_comments.proto:139:43 + Leading comments: + option that sets field + + + + > service[0] > options: +desc_test_comments.proto:141:9 +desc_test_comments.proto:141:47 + + + > service[0] > options > (testprotos.sfubar) > name: +desc_test_comments.proto:141:9 +desc_test_comments.proto:141:47 + Leading comments: + another option that sets field + + + + > service[0] > options: +desc_test_comments.proto:142:9 +desc_test_comments.proto:142:35 + + + > service[0] > options > deprecated: +desc_test_comments.proto:142:9 +desc_test_comments.proto:142:35 + Trailing comments: + DEPRECATED! + + + + > service[0] > options: +desc_test_comments.proto:144:9 +desc_test_comments.proto:144:45 + + + > service[0] > options > (testprotos.sfubare): +desc_test_comments.proto:144:9 +desc_test_comments.proto:144:45 + + + > service[0] > method[0]: +desc_test_comments.proto:147:9 +desc_test_comments.proto:148:84 + Leading comments: + Method comment + + Trailing comments: + compact method trailer + + + + > service[0] > method[0] > name: +desc_test_comments.proto:147:28 +desc_test_comments.proto:147:40 + Leading detached comment [0]: + rpc name + + + > service[0] > method[0] > client_streaming: +desc_test_comments.proto:147:73 +desc_test_comments.proto:147:79 + Leading detached comment [0]: + comment B + + + > service[0] > method[0] > input_type: +desc_test_comments.proto:147:96 +desc_test_comments.proto:147:103 + Leading detached comment [0]: + comment C + + + > service[0] > method[0] > output_type: +desc_test_comments.proto:148:57 +desc_test_comments.proto:148:64 + Leading detached comment [0]: +comment E + + + > service[0] > method[1]: +desc_test_comments.proto:150:9 +desc_test_comments.proto:155:10 + Trailing comments: + trailer for method + + + + > service[0] > method[1] > name: +desc_test_comments.proto:150:13 +desc_test_comments.proto:150:21 + + + > service[0] > method[1] > input_type: +desc_test_comments.proto:150:23 +desc_test_comments.proto:150:30 + + + > service[0] > method[1] > output_type: +desc_test_comments.proto:150:41 +desc_test_comments.proto:150:62 + + + > service[0] > method[1] > options: +desc_test_comments.proto:152:17 +desc_test_comments.proto:152:42 + + + > service[0] > method[1] > options > deprecated: +desc_test_comments.proto:152:17 +desc_test_comments.proto:152:42 + Leading comments: + this RPC is deprecated! + + + + > service[0] > method[1] > options: +desc_test_comments.proto:153:17 +desc_test_comments.proto:153:53 + + + > service[0] > method[1] > options > (testprotos.mtfubar)[0]: +desc_test_comments.proto:153:17 +desc_test_comments.proto:153:53 + + + > service[0] > method[1] > options: +desc_test_comments.proto:154:17 +desc_test_comments.proto:154:56 + + + > service[0] > method[1] > options > (testprotos.mtfubard): +desc_test_comments.proto:154:17 +desc_test_comments.proto:154:56 +---- desc_test_complex.proto ---- + + +: +desc_test_complex.proto:1:1 +desc_test_complex.proto:298:2 + + + > syntax: +desc_test_complex.proto:1:1 +desc_test_complex.proto:1:19 + + + > package: +desc_test_complex.proto:3:1 +desc_test_complex.proto:3:17 + + + > options: +desc_test_complex.proto:5:1 +desc_test_complex.proto:5:73 + + + > options > go_package: +desc_test_complex.proto:5:1 +desc_test_complex.proto:5:73 + + + > dependency[0]: +desc_test_complex.proto:7:1 +desc_test_complex.proto:7:43 + + + > message_type[0]: +desc_test_complex.proto:9:1 +desc_test_complex.proto:14:2 + + + > message_type[0] > name: +desc_test_complex.proto:9:9 +desc_test_complex.proto:9:15 + + + > message_type[0] > field[0]: +desc_test_complex.proto:10:9 +desc_test_complex.proto:10:34 + + + > message_type[0] > field[0] > label: +desc_test_complex.proto:10:9 +desc_test_complex.proto:10:17 + + + > message_type[0] > field[0] > type: +desc_test_complex.proto:10:18 +desc_test_complex.proto:10:24 + + + > message_type[0] > field[0] > name: +desc_test_complex.proto:10:25 +desc_test_complex.proto:10:29 + + + > message_type[0] > field[0] > number: +desc_test_complex.proto:10:32 +desc_test_complex.proto:10:33 + + + > message_type[0] > field[1]: +desc_test_complex.proto:11:9 +desc_test_complex.proto:11:32 + + + > message_type[0] > field[1] > label: +desc_test_complex.proto:11:9 +desc_test_complex.proto:11:17 + + + > message_type[0] > field[1] > type: +desc_test_complex.proto:11:18 +desc_test_complex.proto:11:24 + + + > message_type[0] > field[1] > name: +desc_test_complex.proto:11:25 +desc_test_complex.proto:11:27 + + + > message_type[0] > field[1] > number: +desc_test_complex.proto:11:30 +desc_test_complex.proto:11:31 + + + > message_type[0] > field[2]: +desc_test_complex.proto:12:9 +desc_test_complex.proto:12:35 + Trailing comments: + default JSON name will be capitalized + + + + > message_type[0] > field[2] > label: +desc_test_complex.proto:12:9 +desc_test_complex.proto:12:17 + + + > message_type[0] > field[2] > type: +desc_test_complex.proto:12:18 +desc_test_complex.proto:12:23 + + + > message_type[0] > field[2] > name: +desc_test_complex.proto:12:24 +desc_test_complex.proto:12:30 + + + > message_type[0] > field[2] > number: +desc_test_complex.proto:12:33 +desc_test_complex.proto:12:34 + + + > message_type[0] > field[3]: +desc_test_complex.proto:13:9 +desc_test_complex.proto:13:29 + Trailing comments: + default JSON name will be empty(!) + + + + > message_type[0] > field[3] > label: +desc_test_complex.proto:13:9 +desc_test_complex.proto:13:17 + + + > message_type[0] > field[3] > type: +desc_test_complex.proto:13:18 +desc_test_complex.proto:13:22 + + + > message_type[0] > field[3] > name: +desc_test_complex.proto:13:23 +desc_test_complex.proto:13:24 + + + > message_type[0] > field[3] > number: +desc_test_complex.proto:13:27 +desc_test_complex.proto:13:28 + + + > extension: +desc_test_complex.proto:16:1 +desc_test_complex.proto:20:2 + + + > extension[0]: +desc_test_complex.proto:19:9 +desc_test_complex.proto:19:39 + + + > extension[0] > extendee: +desc_test_complex.proto:16:8 +desc_test_complex.proto:18:25 + + + > extension[0] > label: +desc_test_complex.proto:19:9 +desc_test_complex.proto:19:17 + + + > extension[0] > type: +desc_test_complex.proto:19:18 +desc_test_complex.proto:19:24 + + + > extension[0] > name: +desc_test_complex.proto:19:25 +desc_test_complex.proto:19:30 + + + > extension[0] > number: +desc_test_complex.proto:19:33 +desc_test_complex.proto:19:38 + + + > message_type[1]: +desc_test_complex.proto:22:1 +desc_test_complex.proto:61:2 + + + > message_type[1] > name: +desc_test_complex.proto:22:9 +desc_test_complex.proto:22:13 + + + > message_type[1] > field[0]: +desc_test_complex.proto:23:9 +desc_test_complex.proto:23:55 + + + > message_type[1] > field[0] > label: +desc_test_complex.proto:23:9 +desc_test_complex.proto:23:17 + + + > message_type[1] > field[0] > type: +desc_test_complex.proto:23:18 +desc_test_complex.proto:23:24 + + + > message_type[1] > field[0] > name: +desc_test_complex.proto:23:25 +desc_test_complex.proto:23:28 + + + > message_type[1] > field[0] > number: +desc_test_complex.proto:23:31 +desc_test_complex.proto:23:32 + + + > message_type[1] > field[0] > options: +desc_test_complex.proto:23:33 +desc_test_complex.proto:23:54 + + + > message_type[1] > field[0] > json_name: +desc_test_complex.proto:23:34 +desc_test_complex.proto:23:53 + + + > message_type[1] > field[1]: +desc_test_complex.proto:24:9 +desc_test_complex.proto:24:34 + + + > message_type[1] > field[1] > label: +desc_test_complex.proto:24:9 +desc_test_complex.proto:24:17 + + + > message_type[1] > field[1] > type: +desc_test_complex.proto:24:18 +desc_test_complex.proto:24:23 + + + > message_type[1] > field[1] > name: +desc_test_complex.proto:24:24 +desc_test_complex.proto:24:29 + + + > message_type[1] > field[1] > number: +desc_test_complex.proto:24:32 +desc_test_complex.proto:24:33 + + + > message_type[1] > field[2]: +desc_test_complex.proto:25:9 +desc_test_complex.proto:25:31 + + + > message_type[1] > field[2] > label: +desc_test_complex.proto:25:9 +desc_test_complex.proto:25:17 + + + > message_type[1] > field[2] > type_name: +desc_test_complex.proto:25:18 +desc_test_complex.proto:25:24 + + + > message_type[1] > field[2] > name: +desc_test_complex.proto:25:25 +desc_test_complex.proto:25:26 + + + > message_type[1] > field[2] > number: +desc_test_complex.proto:25:29 +desc_test_complex.proto:25:30 + + + > message_type[1] > field[3]: +desc_test_complex.proto:26:9 +desc_test_complex.proto:26:31 + + + > message_type[1] > field[3] > label: +desc_test_complex.proto:26:9 +desc_test_complex.proto:26:17 + + + > message_type[1] > field[3] > type_name: +desc_test_complex.proto:26:18 +desc_test_complex.proto:26:24 + + + > message_type[1] > field[3] > name: +desc_test_complex.proto:26:25 +desc_test_complex.proto:26:26 + + + > message_type[1] > field[3] > number: +desc_test_complex.proto:26:29 +desc_test_complex.proto:26:30 + + + > message_type[1] > field[4]: +desc_test_complex.proto:27:9 +desc_test_complex.proto:27:34 + + + > message_type[1] > field[4] > type_name: +desc_test_complex.proto:27:9 +desc_test_complex.proto:27:27 + + + > message_type[1] > field[4] > name: +desc_test_complex.proto:27:28 +desc_test_complex.proto:27:29 + + + > message_type[1] > field[4] > number: +desc_test_complex.proto:27:32 +desc_test_complex.proto:27:33 + + + > message_type[1] > field[5]: +desc_test_complex.proto:29:9 +desc_test_complex.proto:29:67 + + + > message_type[1] > field[5] > label: +desc_test_complex.proto:29:9 +desc_test_complex.proto:29:17 + + + > message_type[1] > field[5] > type: +desc_test_complex.proto:29:18 +desc_test_complex.proto:29:23 + + + > message_type[1] > field[5] > name: +desc_test_complex.proto:29:24 +desc_test_complex.proto:29:25 + + + > message_type[1] > field[5] > number: +desc_test_complex.proto:29:28 +desc_test_complex.proto:29:29 + + + > message_type[1] > field[5] > options: +desc_test_complex.proto:29:30 +desc_test_complex.proto:29:66 + + + > message_type[1] > field[5] > default_value: +desc_test_complex.proto:29:31 +desc_test_complex.proto:29:65 + + + > message_type[1] > extension_range: +desc_test_complex.proto:31:9 +desc_test_complex.proto:31:31 + + + > message_type[1] > extension_range[0]: +desc_test_complex.proto:31:20 +desc_test_complex.proto:31:30 + + + > message_type[1] > extension_range[0] > start: +desc_test_complex.proto:31:20 +desc_test_complex.proto:31:23 + + + > message_type[1] > extension_range[0] > end: +desc_test_complex.proto:31:27 +desc_test_complex.proto:31:30 + + + > message_type[1] > extension_range: +desc_test_complex.proto:33:9 +desc_test_complex.proto:33:81 + + + > message_type[1] > extension_range[1]: +desc_test_complex.proto:33:20 +desc_test_complex.proto:33:23 + + + > message_type[1] > extension_range[1] > start: +desc_test_complex.proto:33:20 +desc_test_complex.proto:33:23 + + + > message_type[1] > extension_range[1] > end: +desc_test_complex.proto:33:20 +desc_test_complex.proto:33:23 + + + > message_type[1] > extension_range[2]: +desc_test_complex.proto:33:25 +desc_test_complex.proto:33:35 + + + > message_type[1] > extension_range[2] > start: +desc_test_complex.proto:33:25 +desc_test_complex.proto:33:28 + + + > message_type[1] > extension_range[2] > end: +desc_test_complex.proto:33:32 +desc_test_complex.proto:33:35 + + + > message_type[1] > extension_range[3]: +desc_test_complex.proto:33:37 +desc_test_complex.proto:33:47 + + + > message_type[1] > extension_range[3] > start: +desc_test_complex.proto:33:37 +desc_test_complex.proto:33:40 + + + > message_type[1] > extension_range[3] > end: +desc_test_complex.proto:33:44 +desc_test_complex.proto:33:47 + + + > message_type[1] > extension_range[4]: +desc_test_complex.proto:33:49 +desc_test_complex.proto:33:61 + + + > message_type[1] > extension_range[4] > start: +desc_test_complex.proto:33:49 +desc_test_complex.proto:33:54 + + + > message_type[1] > extension_range[4] > end: +desc_test_complex.proto:33:58 +desc_test_complex.proto:33:61 + + + > message_type[1] > extension_range[1] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:80 + + + > message_type[1] > extension_range[1] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:79 + + + > message_type[1] > extension_range[2] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:80 + + + > message_type[1] > extension_range[2] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:79 + + + > message_type[1] > extension_range[3] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:80 + + + > message_type[1] > extension_range[3] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:79 + + + > message_type[1] > extension_range[4] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:80 + + + > message_type[1] > extension_range[4] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:79 + + + > message_type[1] > nested_type[1]: +desc_test_complex.proto:35:9 +desc_test_complex.proto:60:10 + + + > message_type[1] > nested_type[1] > name: +desc_test_complex.proto:35:17 +desc_test_complex.proto:35:23 + + + > message_type[1] > nested_type[1] > extension: +desc_test_complex.proto:36:17 +desc_test_complex.proto:38:18 + + + > message_type[1] > nested_type[1] > extension[0]: +desc_test_complex.proto:37:25 +desc_test_complex.proto:37:56 + + + > message_type[1] > nested_type[1] > extension[0] > extendee: +desc_test_complex.proto:36:24 +desc_test_complex.proto:36:54 + + + > message_type[1] > nested_type[1] > extension[0] > label: +desc_test_complex.proto:37:25 +desc_test_complex.proto:37:33 + + + > message_type[1] > nested_type[1] > extension[0] > type: +desc_test_complex.proto:37:34 +desc_test_complex.proto:37:39 + + + > message_type[1] > nested_type[1] > extension[0] > name: +desc_test_complex.proto:37:40 +desc_test_complex.proto:37:47 + + + > message_type[1] > nested_type[1] > extension[0] > number: +desc_test_complex.proto:37:50 +desc_test_complex.proto:37:55 + + + > message_type[1] > nested_type[1] > nested_type[0]: +desc_test_complex.proto:39:17 +desc_test_complex.proto:59:18 + + + > message_type[1] > nested_type[1] > nested_type[0] > name: +desc_test_complex.proto:39:25 +desc_test_complex.proto:39:38 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0]: +desc_test_complex.proto:40:25 +desc_test_complex.proto:48:26 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > name: +desc_test_complex.proto:40:30 +desc_test_complex.proto:40:33 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0]: +desc_test_complex.proto:41:33 +desc_test_complex.proto:41:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0] > name: +desc_test_complex.proto:41:33 +desc_test_complex.proto:41:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0] > number: +desc_test_complex.proto:41:38 +desc_test_complex.proto:41:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1]: +desc_test_complex.proto:42:33 +desc_test_complex.proto:42:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1] > name: +desc_test_complex.proto:42:33 +desc_test_complex.proto:42:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1] > number: +desc_test_complex.proto:42:38 +desc_test_complex.proto:42:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2]: +desc_test_complex.proto:43:33 +desc_test_complex.proto:43:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2] > name: +desc_test_complex.proto:43:33 +desc_test_complex.proto:43:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2] > number: +desc_test_complex.proto:43:38 +desc_test_complex.proto:43:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3]: +desc_test_complex.proto:44:33 +desc_test_complex.proto:44:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3] > name: +desc_test_complex.proto:44:33 +desc_test_complex.proto:44:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3] > number: +desc_test_complex.proto:44:38 +desc_test_complex.proto:44:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4]: +desc_test_complex.proto:45:33 +desc_test_complex.proto:45:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4] > name: +desc_test_complex.proto:45:33 +desc_test_complex.proto:45:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4] > number: +desc_test_complex.proto:45:38 +desc_test_complex.proto:45:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5]: +desc_test_complex.proto:46:33 +desc_test_complex.proto:46:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5] > name: +desc_test_complex.proto:46:33 +desc_test_complex.proto:46:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5] > number: +desc_test_complex.proto:46:38 +desc_test_complex.proto:46:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6]: +desc_test_complex.proto:47:33 +desc_test_complex.proto:47:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6] > name: +desc_test_complex.proto:47:33 +desc_test_complex.proto:47:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6] > number: +desc_test_complex.proto:47:38 +desc_test_complex.proto:47:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > options: +desc_test_complex.proto:49:25 +desc_test_complex.proto:49:50 + + + > message_type[1] > nested_type[1] > nested_type[0] > options > (foo.bar.Test.Nested.fooblez): +desc_test_complex.proto:49:25 +desc_test_complex.proto:49:50 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension: +desc_test_complex.proto:50:25 +desc_test_complex.proto:52:26 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0]: +desc_test_complex.proto:51:33 +desc_test_complex.proto:51:64 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > extendee: +desc_test_complex.proto:50:32 +desc_test_complex.proto:50:36 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > label: +desc_test_complex.proto:51:33 +desc_test_complex.proto:51:41 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > type: +desc_test_complex.proto:51:42 +desc_test_complex.proto:51:48 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > name: +desc_test_complex.proto:51:49 +desc_test_complex.proto:51:57 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > number: +desc_test_complex.proto:51:60 +desc_test_complex.proto:51:63 + + + > message_type[1] > nested_type[1] > nested_type[0] > options: +desc_test_complex.proto:53:25 +desc_test_complex.proto:53:108 + + + > message_type[1] > nested_type[1] > nested_type[0] > options > (foo.bar.rept)[0]: +desc_test_complex.proto:53:25 +desc_test_complex.proto:53:108 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0]: +desc_test_complex.proto:54:25 +desc_test_complex.proto:58:26 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > name: +desc_test_complex.proto:54:33 +desc_test_complex.proto:54:51 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > options: +desc_test_complex.proto:55:33 +desc_test_complex.proto:55:109 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > options > (foo.bar.rept)[0]: +desc_test_complex.proto:55:33 +desc_test_complex.proto:55:109 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0]: +desc_test_complex.proto:57:33 +desc_test_complex.proto:57:56 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > label: +desc_test_complex.proto:57:33 +desc_test_complex.proto:57:41 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > type_name: +desc_test_complex.proto:57:42 +desc_test_complex.proto:57:46 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > name: +desc_test_complex.proto:57:47 +desc_test_complex.proto:57:51 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > number: +desc_test_complex.proto:57:54 +desc_test_complex.proto:57:55 + + + > enum_type[0]: +desc_test_complex.proto:63:1 +desc_test_complex.proto:72:2 + + + > enum_type[0] > name: +desc_test_complex.proto:63:6 +desc_test_complex.proto:63:26 + + + > enum_type[0] > value[0]: +desc_test_complex.proto:64:9 +desc_test_complex.proto:64:15 + + + > enum_type[0] > value[0] > name: +desc_test_complex.proto:64:9 +desc_test_complex.proto:64:10 + + + > enum_type[0] > value[0] > number: +desc_test_complex.proto:64:13 +desc_test_complex.proto:64:14 + + + > enum_type[0] > value[1]: +desc_test_complex.proto:65:9 +desc_test_complex.proto:65:15 + + + > enum_type[0] > value[1] > name: +desc_test_complex.proto:65:9 +desc_test_complex.proto:65:10 + + + > enum_type[0] > value[1] > number: +desc_test_complex.proto:65:13 +desc_test_complex.proto:65:14 + + + > enum_type[0] > value[2]: +desc_test_complex.proto:66:9 +desc_test_complex.proto:66:15 + + + > enum_type[0] > value[2] > name: +desc_test_complex.proto:66:9 +desc_test_complex.proto:66:10 + + + > enum_type[0] > value[2] > number: +desc_test_complex.proto:66:13 +desc_test_complex.proto:66:14 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:67:9 +desc_test_complex.proto:67:30 + + + > enum_type[0] > reserved_range[0]: +desc_test_complex.proto:67:18 +desc_test_complex.proto:67:29 + + + > enum_type[0] > reserved_range[0] > start: +desc_test_complex.proto:67:18 +desc_test_complex.proto:67:22 + + + > enum_type[0] > reserved_range[0] > end: +desc_test_complex.proto:67:26 +desc_test_complex.proto:67:29 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:68:9 +desc_test_complex.proto:68:26 + + + > enum_type[0] > reserved_range[1]: +desc_test_complex.proto:68:18 +desc_test_complex.proto:68:25 + + + > enum_type[0] > reserved_range[1] > start: +desc_test_complex.proto:68:18 +desc_test_complex.proto:68:20 + + + > enum_type[0] > reserved_range[1] > end: +desc_test_complex.proto:68:24 +desc_test_complex.proto:68:25 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:69:9 +desc_test_complex.proto:69:40 + + + > enum_type[0] > reserved_range[2]: +desc_test_complex.proto:69:18 +desc_test_complex.proto:69:25 + + + > enum_type[0] > reserved_range[2] > start: +desc_test_complex.proto:69:18 +desc_test_complex.proto:69:19 + + + > enum_type[0] > reserved_range[2] > end: +desc_test_complex.proto:69:23 +desc_test_complex.proto:69:25 + + + > enum_type[0] > reserved_range[3]: +desc_test_complex.proto:69:27 +desc_test_complex.proto:69:35 + + + > enum_type[0] > reserved_range[3] > start: +desc_test_complex.proto:69:27 +desc_test_complex.proto:69:29 + + + > enum_type[0] > reserved_range[3] > end: +desc_test_complex.proto:69:33 +desc_test_complex.proto:69:35 + + + > enum_type[0] > reserved_range[4]: +desc_test_complex.proto:69:37 +desc_test_complex.proto:69:39 + + + > enum_type[0] > reserved_range[4] > start: +desc_test_complex.proto:69:37 +desc_test_complex.proto:69:39 + + + > enum_type[0] > reserved_range[4] > end: +desc_test_complex.proto:69:37 +desc_test_complex.proto:69:39 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:70:9 +desc_test_complex.proto:70:27 + + + > enum_type[0] > reserved_range[5]: +desc_test_complex.proto:70:18 +desc_test_complex.proto:70:26 + + + > enum_type[0] > reserved_range[5] > start: +desc_test_complex.proto:70:18 +desc_test_complex.proto:70:20 + + + > enum_type[0] > reserved_range[5] > end: +desc_test_complex.proto:70:24 +desc_test_complex.proto:70:26 + + + > enum_type[0] > reserved_name: +desc_test_complex.proto:71:9 +desc_test_complex.proto:71:32 + + + > enum_type[0] > reserved_name[0]: +desc_test_complex.proto:71:18 +desc_test_complex.proto:71:21 + + + > enum_type[0] > reserved_name[1]: +desc_test_complex.proto:71:23 +desc_test_complex.proto:71:26 + + + > enum_type[0] > reserved_name[2]: +desc_test_complex.proto:71:28 +desc_test_complex.proto:71:31 + + + > message_type[2]: +desc_test_complex.proto:74:1 +desc_test_complex.proto:78:2 + + + > message_type[2] > name: +desc_test_complex.proto:74:9 +desc_test_complex.proto:74:32 + + + > message_type[2] > reserved_range: +desc_test_complex.proto:75:9 +desc_test_complex.proto:75:40 + + + > message_type[2] > reserved_range[0]: +desc_test_complex.proto:75:18 +desc_test_complex.proto:75:25 + + + > message_type[2] > reserved_range[0] > start: +desc_test_complex.proto:75:18 +desc_test_complex.proto:75:19 + + + > message_type[2] > reserved_range[0] > end: +desc_test_complex.proto:75:23 +desc_test_complex.proto:75:25 + + + > message_type[2] > reserved_range[1]: +desc_test_complex.proto:75:27 +desc_test_complex.proto:75:35 + + + > message_type[2] > reserved_range[1] > start: +desc_test_complex.proto:75:27 +desc_test_complex.proto:75:29 + + + > message_type[2] > reserved_range[1] > end: +desc_test_complex.proto:75:33 +desc_test_complex.proto:75:35 + + + > message_type[2] > reserved_range[2]: +desc_test_complex.proto:75:37 +desc_test_complex.proto:75:39 + + + > message_type[2] > reserved_range[2] > start: +desc_test_complex.proto:75:37 +desc_test_complex.proto:75:39 + + + > message_type[2] > reserved_range[2] > end: +desc_test_complex.proto:75:37 +desc_test_complex.proto:75:39 + + + > message_type[2] > reserved_range: +desc_test_complex.proto:76:9 +desc_test_complex.proto:76:30 + + + > message_type[2] > reserved_range[3]: +desc_test_complex.proto:76:18 +desc_test_complex.proto:76:29 + + + > message_type[2] > reserved_range[3] > start: +desc_test_complex.proto:76:18 +desc_test_complex.proto:76:22 + + + > message_type[2] > reserved_range[3] > end: +desc_test_complex.proto:76:26 +desc_test_complex.proto:76:29 + + + > message_type[2] > reserved_name: +desc_test_complex.proto:77:9 +desc_test_complex.proto:77:32 + + + > message_type[2] > reserved_name[0]: +desc_test_complex.proto:77:18 +desc_test_complex.proto:77:21 + + + > message_type[2] > reserved_name[1]: +desc_test_complex.proto:77:23 +desc_test_complex.proto:77:26 + + + > message_type[2] > reserved_name[2]: +desc_test_complex.proto:77:28 +desc_test_complex.proto:77:31 + + + > message_type[3]: +desc_test_complex.proto:80:1 +desc_test_complex.proto:82:2 + + + > message_type[3] > name: +desc_test_complex.proto:80:9 +desc_test_complex.proto:80:23 + + + > message_type[3] > field[0]: +desc_test_complex.proto:81:9 +desc_test_complex.proto:81:38 + + + > message_type[3] > field[0] > type_name: +desc_test_complex.proto:81:9 +desc_test_complex.proto:81:28 + + + > message_type[3] > field[0] > name: +desc_test_complex.proto:81:29 +desc_test_complex.proto:81:33 + + + > message_type[3] > field[0] > number: +desc_test_complex.proto:81:36 +desc_test_complex.proto:81:37 + + + > extension: +desc_test_complex.proto:84:1 +desc_test_complex.proto:89:2 + + + > extension[1]: +desc_test_complex.proto:85:9 +desc_test_complex.proto:85:36 + + + > extension[1] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[1] > label: +desc_test_complex.proto:85:9 +desc_test_complex.proto:85:17 + + + > extension[1] > type_name: +desc_test_complex.proto:85:18 +desc_test_complex.proto:85:22 + + + > extension[1] > name: +desc_test_complex.proto:85:23 +desc_test_complex.proto:85:27 + + + > extension[1] > number: +desc_test_complex.proto:85:30 +desc_test_complex.proto:85:35 + + + > extension[2]: +desc_test_complex.proto:86:9 +desc_test_complex.proto:86:60 + + + > extension[2] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[2] > label: +desc_test_complex.proto:86:9 +desc_test_complex.proto:86:17 + + + > extension[2] > type_name: +desc_test_complex.proto:86:18 +desc_test_complex.proto:86:47 + + + > extension[2] > name: +desc_test_complex.proto:86:48 +desc_test_complex.proto:86:51 + + + > extension[2] > number: +desc_test_complex.proto:86:54 +desc_test_complex.proto:86:59 + + + > extension[3]: +desc_test_complex.proto:87:9 +desc_test_complex.proto:87:36 + + + > extension[3] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[3] > label: +desc_test_complex.proto:87:9 +desc_test_complex.proto:87:17 + + + > extension[3] > type_name: +desc_test_complex.proto:87:18 +desc_test_complex.proto:87:25 + + + > extension[3] > name: +desc_test_complex.proto:87:26 +desc_test_complex.proto:87:27 + + + > extension[3] > number: +desc_test_complex.proto:87:30 +desc_test_complex.proto:87:35 + + + > extension[4]: +desc_test_complex.proto:88:9 +desc_test_complex.proto:88:50 + + + > extension[4] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[4] > label: +desc_test_complex.proto:88:9 +desc_test_complex.proto:88:17 + + + > extension[4] > type_name: +desc_test_complex.proto:88:18 +desc_test_complex.proto:88:32 + + + > extension[4] > name: +desc_test_complex.proto:88:33 +desc_test_complex.proto:88:41 + + + > extension[4] > number: +desc_test_complex.proto:88:44 +desc_test_complex.proto:88:49 + + + > message_type[4]: +desc_test_complex.proto:91:1 +desc_test_complex.proto:111:2 + + + > message_type[4] > name: +desc_test_complex.proto:91:9 +desc_test_complex.proto:91:16 + + + > message_type[4] > options: +desc_test_complex.proto:92:5 +desc_test_complex.proto:92:130 + + + > message_type[4] > options > (foo.bar.rept)[0]: +desc_test_complex.proto:92:5 +desc_test_complex.proto:92:130 + + + > message_type[4] > options: +desc_test_complex.proto:93:5 +desc_test_complex.proto:93:115 + + + > message_type[4] > options > (foo.bar.rept)[1]: +desc_test_complex.proto:93:5 +desc_test_complex.proto:93:115 + + + > message_type[4] > options: +desc_test_complex.proto:94:5 +desc_test_complex.proto:94:36 + + + > message_type[4] > options > (foo.bar.rept)[2]: +desc_test_complex.proto:94:5 +desc_test_complex.proto:94:36 + + + > message_type[4] > options: +desc_test_complex.proto:95:5 +desc_test_complex.proto:95:23 + + + > message_type[4] > options > (foo.bar.eee): +desc_test_complex.proto:95:5 +desc_test_complex.proto:95:23 + + + > message_type[4] > options: +desc_test_complex.proto:96:9 +desc_test_complex.proto:96:34 + + + > message_type[4] > options > (foo.bar.a): +desc_test_complex.proto:96:9 +desc_test_complex.proto:96:34 + + + > message_type[4] > options: +desc_test_complex.proto:97:9 +desc_test_complex.proto:97:86 + + + > message_type[4] > options > (foo.bar.a) > test: +desc_test_complex.proto:97:9 +desc_test_complex.proto:97:86 + + + > message_type[4] > options: +desc_test_complex.proto:98:9 +desc_test_complex.proto:98:37 + + + > message_type[4] > options > (foo.bar.a) > test > foo: +desc_test_complex.proto:98:9 +desc_test_complex.proto:98:37 + + + > message_type[4] > options: +desc_test_complex.proto:99:9 +desc_test_complex.proto:99:41 + + + > message_type[4] > options > (foo.bar.a) > test > s > name: +desc_test_complex.proto:99:9 +desc_test_complex.proto:99:41 + + + > message_type[4] > options: +desc_test_complex.proto:100:5 +desc_test_complex.proto:100:34 + + + > message_type[4] > options > (foo.bar.a) > test > s > id: +desc_test_complex.proto:100:5 +desc_test_complex.proto:100:34 + + + > message_type[4] > options: +desc_test_complex.proto:101:5 +desc_test_complex.proto:101:31 + + + > message_type[4] > options > (foo.bar.a) > test > array[0]: +desc_test_complex.proto:101:5 +desc_test_complex.proto:101:31 + + + > message_type[4] > options: +desc_test_complex.proto:102:5 +desc_test_complex.proto:102:31 + + + > message_type[4] > options > (foo.bar.a) > test > array[1]: +desc_test_complex.proto:102:5 +desc_test_complex.proto:102:31 + + + > message_type[4] > options: +desc_test_complex.proto:103:5 +desc_test_complex.proto:103:78 + + + > message_type[4] > options > (foo.bar.a) > test > (foo.bar.Test.Nested._NestedNested._garblez): +desc_test_complex.proto:103:5 +desc_test_complex.proto:103:78 + + + > message_type[4] > options: +desc_test_complex.proto:105:9 +desc_test_complex.proto:105:37 + + + > message_type[4] > options > (foo.bar.map_vals) > vals[0]: +desc_test_complex.proto:105:9 +desc_test_complex.proto:105:37 + Trailing comments: + no key, no value + + + + > message_type[4] > options: +desc_test_complex.proto:106:9 +desc_test_complex.proto:106:47 + + + > message_type[4] > options > (foo.bar.map_vals) > vals[1]: +desc_test_complex.proto:106:9 +desc_test_complex.proto:106:47 + Trailing comments: + no value + + + + > message_type[4] > options: +desc_test_complex.proto:107:9 +desc_test_complex.proto:107:69 + + + > message_type[4] > options > (foo.bar.map_vals) > vals[2]: +desc_test_complex.proto:107:9 +desc_test_complex.proto:107:69 + + + > message_type[4] > field[0]: +desc_test_complex.proto:109:5 +desc_test_complex.proto:109:28 + + + > message_type[4] > field[0] > label: +desc_test_complex.proto:109:5 +desc_test_complex.proto:109:13 + + + > message_type[4] > field[0] > type_name: +desc_test_complex.proto:109:14 +desc_test_complex.proto:109:18 + + + > message_type[4] > field[0] > name: +desc_test_complex.proto:109:19 +desc_test_complex.proto:109:23 + + + > message_type[4] > field[0] > number: +desc_test_complex.proto:109:26 +desc_test_complex.proto:109:27 + + + > message_type[4] > field[1]: +desc_test_complex.proto:110:5 +desc_test_complex.proto:110:67 + + + > message_type[4] > field[1] > label: +desc_test_complex.proto:110:5 +desc_test_complex.proto:110:13 + + + > message_type[4] > field[1] > type_name: +desc_test_complex.proto:110:14 +desc_test_complex.proto:110:43 + + + > message_type[4] > field[1] > name: +desc_test_complex.proto:110:44 +desc_test_complex.proto:110:47 + + + > message_type[4] > field[1] > number: +desc_test_complex.proto:110:50 +desc_test_complex.proto:110:51 + + + > message_type[4] > field[1] > options: +desc_test_complex.proto:110:52 +desc_test_complex.proto:110:66 + + + > message_type[4] > field[1] > default_value: +desc_test_complex.proto:110:53 +desc_test_complex.proto:110:65 + + + > message_type[5]: +desc_test_complex.proto:113:1 +desc_test_complex.proto:127:2 + + + > message_type[5] > name: +desc_test_complex.proto:113:9 +desc_test_complex.proto:113:18 + + + > message_type[5] > field[0]: +desc_test_complex.proto:114:9 +desc_test_complex.proto:114:41 + + + > message_type[5] > field[0] > label: +desc_test_complex.proto:114:9 +desc_test_complex.proto:114:17 + + + > message_type[5] > field[0] > type: +desc_test_complex.proto:114:18 +desc_test_complex.proto:114:22 + + + > message_type[5] > field[0] > name: +desc_test_complex.proto:114:23 +desc_test_complex.proto:114:36 + + + > message_type[5] > field[0] > number: +desc_test_complex.proto:114:39 +desc_test_complex.proto:114:40 + + + > message_type[5] > enum_type[0]: +desc_test_complex.proto:116:9 +desc_test_complex.proto:120:10 + + + > message_type[5] > enum_type[0] > name: +desc_test_complex.proto:116:14 +desc_test_complex.proto:116:20 + + + > message_type[5] > enum_type[0] > value[0]: +desc_test_complex.proto:117:17 +desc_test_complex.proto:117:27 + + + > message_type[5] > enum_type[0] > value[0] > name: +desc_test_complex.proto:117:17 +desc_test_complex.proto:117:22 + + + > message_type[5] > enum_type[0] > value[0] > number: +desc_test_complex.proto:117:25 +desc_test_complex.proto:117:26 + + + > message_type[5] > enum_type[0] > value[1]: +desc_test_complex.proto:118:17 +desc_test_complex.proto:118:26 + + + > message_type[5] > enum_type[0] > value[1] > name: +desc_test_complex.proto:118:17 +desc_test_complex.proto:118:21 + + + > message_type[5] > enum_type[0] > value[1] > number: +desc_test_complex.proto:118:24 +desc_test_complex.proto:118:25 + + + > message_type[5] > enum_type[0] > value[2]: +desc_test_complex.proto:119:17 +desc_test_complex.proto:119:27 + + + > message_type[5] > enum_type[0] > value[2] > name: +desc_test_complex.proto:119:17 +desc_test_complex.proto:119:22 + + + > message_type[5] > enum_type[0] > value[2] > number: +desc_test_complex.proto:119:25 +desc_test_complex.proto:119:26 + + + > message_type[5] > nested_type[0]: +desc_test_complex.proto:121:9 +desc_test_complex.proto:124:10 + + + > message_type[5] > nested_type[0] > name: +desc_test_complex.proto:121:17 +desc_test_complex.proto:121:27 + + + > message_type[5] > nested_type[0] > field[0]: +desc_test_complex.proto:122:17 +desc_test_complex.proto:122:44 + + + > message_type[5] > nested_type[0] > field[0] > label: +desc_test_complex.proto:122:17 +desc_test_complex.proto:122:25 + + + > message_type[5] > nested_type[0] > field[0] > type_name: +desc_test_complex.proto:122:26 +desc_test_complex.proto:122:32 + + + > message_type[5] > nested_type[0] > field[0] > name: +desc_test_complex.proto:122:33 +desc_test_complex.proto:122:39 + + + > message_type[5] > nested_type[0] > field[0] > number: +desc_test_complex.proto:122:42 +desc_test_complex.proto:122:43 + + + > message_type[5] > nested_type[0] > field[1]: +desc_test_complex.proto:123:17 +desc_test_complex.proto:123:44 + + + > message_type[5] > nested_type[0] > field[1] > label: +desc_test_complex.proto:123:17 +desc_test_complex.proto:123:25 + + + > message_type[5] > nested_type[0] > field[1] > type: +desc_test_complex.proto:123:26 +desc_test_complex.proto:123:32 + + + > message_type[5] > nested_type[0] > field[1] > name: +desc_test_complex.proto:123:33 +desc_test_complex.proto:123:39 + + + > message_type[5] > nested_type[0] > field[1] > number: +desc_test_complex.proto:123:42 +desc_test_complex.proto:123:43 + + + > message_type[5] > field[1]: +desc_test_complex.proto:126:9 +desc_test_complex.proto:126:44 + + + > message_type[5] > field[1] > label: +desc_test_complex.proto:126:9 +desc_test_complex.proto:126:17 + + + > message_type[5] > field[1] > type_name: +desc_test_complex.proto:126:18 +desc_test_complex.proto:126:28 + + + > message_type[5] > field[1] > name: +desc_test_complex.proto:126:29 +desc_test_complex.proto:126:39 + + + > message_type[5] > field[1] > number: +desc_test_complex.proto:126:42 +desc_test_complex.proto:126:43 + + + > extension: +desc_test_complex.proto:129:1 +desc_test_complex.proto:131:2 + + + > extension[5]: +desc_test_complex.proto:130:9 +desc_test_complex.proto:130:46 + + + > extension[5] > extendee: +desc_test_complex.proto:129:8 +desc_test_complex.proto:129:37 + + + > extension[5] > label: +desc_test_complex.proto:130:9 +desc_test_complex.proto:130:17 + + + > extension[5] > type_name: +desc_test_complex.proto:130:18 +desc_test_complex.proto:130:27 + + + > extension[5] > name: +desc_test_complex.proto:130:28 +desc_test_complex.proto:130:37 + + + > extension[5] > number: +desc_test_complex.proto:130:40 +desc_test_complex.proto:130:45 + + + > service[0]: +desc_test_complex.proto:133:1 +desc_test_complex.proto:152:2 + + + > service[0] > name: +desc_test_complex.proto:133:9 +desc_test_complex.proto:133:24 + + + > service[0] > method[0]: +desc_test_complex.proto:134:9 +desc_test_complex.proto:142:10 + + + > service[0] > method[0] > name: +desc_test_complex.proto:134:13 +desc_test_complex.proto:134:21 + + + > service[0] > method[0] > input_type: +desc_test_complex.proto:134:22 +desc_test_complex.proto:134:26 + + + > service[0] > method[0] > output_type: +desc_test_complex.proto:134:37 +desc_test_complex.proto:134:41 + + + > service[0] > method[0] > options: +desc_test_complex.proto:135:17 +desc_test_complex.proto:141:19 + + + > service[0] > method[0] > options > (foo.bar.validator): +desc_test_complex.proto:135:17 +desc_test_complex.proto:141:19 + + + > service[0] > method[1]: +desc_test_complex.proto:143:9 +desc_test_complex.proto:151:10 + + + > service[0] > method[1] > name: +desc_test_complex.proto:143:13 +desc_test_complex.proto:143:16 + + + > service[0] > method[1] > input_type: +desc_test_complex.proto:143:17 +desc_test_complex.proto:143:21 + + + > service[0] > method[1] > output_type: +desc_test_complex.proto:143:32 +desc_test_complex.proto:143:36 + + + > service[0] > method[1] > options: +desc_test_complex.proto:144:17 +desc_test_complex.proto:150:19 + + + > service[0] > method[1] > options > (foo.bar.validator): +desc_test_complex.proto:144:17 +desc_test_complex.proto:150:19 + + + > message_type[6]: +desc_test_complex.proto:154:1 +desc_test_complex.proto:180:2 + + + > message_type[6] > name: +desc_test_complex.proto:154:9 +desc_test_complex.proto:154:13 + + + > message_type[6] > nested_type[0]: +desc_test_complex.proto:155:3 +desc_test_complex.proto:160:4 + + + > message_type[6] > nested_type[0] > name: +desc_test_complex.proto:155:11 +desc_test_complex.proto:155:21 + + + > message_type[6] > nested_type[0] > field[0]: +desc_test_complex.proto:156:5 +desc_test_complex.proto:156:33 + + + > message_type[6] > nested_type[0] > field[0] > label: +desc_test_complex.proto:156:5 +desc_test_complex.proto:156:13 + + + > message_type[6] > nested_type[0] > field[0] > type: +desc_test_complex.proto:156:14 +desc_test_complex.proto:156:20 + + + > message_type[6] > nested_type[0] > field[0] > name: +desc_test_complex.proto:156:21 +desc_test_complex.proto:156:28 + + + > message_type[6] > nested_type[0] > field[0] > number: +desc_test_complex.proto:156:31 +desc_test_complex.proto:156:32 + + + > message_type[6] > nested_type[0] > field[1]: +desc_test_complex.proto:157:5 +desc_test_complex.proto:157:35 + + + > message_type[6] > nested_type[0] > field[1] > label: +desc_test_complex.proto:157:5 +desc_test_complex.proto:157:13 + + + > message_type[6] > nested_type[0] > field[1] > type: +desc_test_complex.proto:157:14 +desc_test_complex.proto:157:18 + + + > message_type[6] > nested_type[0] > field[1] > name: +desc_test_complex.proto:157:19 +desc_test_complex.proto:157:30 + + + > message_type[6] > nested_type[0] > field[1] > number: +desc_test_complex.proto:157:33 +desc_test_complex.proto:157:34 + + + > message_type[6] > nested_type[0] > field[2]: +desc_test_complex.proto:158:5 +desc_test_complex.proto:158:32 + + + > message_type[6] > nested_type[0] > field[2] > label: +desc_test_complex.proto:158:5 +desc_test_complex.proto:158:13 + + + > message_type[6] > nested_type[0] > field[2] > type: +desc_test_complex.proto:158:14 +desc_test_complex.proto:158:19 + + + > message_type[6] > nested_type[0] > field[2] > name: +desc_test_complex.proto:158:20 +desc_test_complex.proto:158:27 + + + > message_type[6] > nested_type[0] > field[2] > number: +desc_test_complex.proto:158:30 +desc_test_complex.proto:158:31 + + + > message_type[6] > nested_type[0] > field[3]: +desc_test_complex.proto:159:5 +desc_test_complex.proto:159:32 + + + > message_type[6] > nested_type[0] > field[3] > label: +desc_test_complex.proto:159:5 +desc_test_complex.proto:159:13 + + + > message_type[6] > nested_type[0] > field[3] > type: +desc_test_complex.proto:159:14 +desc_test_complex.proto:159:19 + + + > message_type[6] > nested_type[0] > field[3] > name: +desc_test_complex.proto:159:20 +desc_test_complex.proto:159:27 + + + > message_type[6] > nested_type[0] > field[3] > number: +desc_test_complex.proto:159:30 +desc_test_complex.proto:159:31 + + + > message_type[6] > nested_type[1]: +desc_test_complex.proto:161:3 +desc_test_complex.proto:164:4 + + + > message_type[6] > nested_type[1] > name: +desc_test_complex.proto:161:11 +desc_test_complex.proto:161:18 + + + > message_type[6] > nested_type[1] > field[0]: +desc_test_complex.proto:162:5 +desc_test_complex.proto:162:32 + + + > message_type[6] > nested_type[1] > field[0] > label: +desc_test_complex.proto:162:5 +desc_test_complex.proto:162:13 + + + > message_type[6] > nested_type[1] > field[0] > type: +desc_test_complex.proto:162:14 +desc_test_complex.proto:162:19 + + + > message_type[6] > nested_type[1] > field[0] > name: +desc_test_complex.proto:162:20 +desc_test_complex.proto:162:27 + + + > message_type[6] > nested_type[1] > field[0] > number: +desc_test_complex.proto:162:30 +desc_test_complex.proto:162:31 + + + > message_type[6] > nested_type[1] > field[1]: +desc_test_complex.proto:163:5 +desc_test_complex.proto:163:33 + + + > message_type[6] > nested_type[1] > field[1] > label: +desc_test_complex.proto:163:5 +desc_test_complex.proto:163:13 + + + > message_type[6] > nested_type[1] > field[1] > type: +desc_test_complex.proto:163:14 +desc_test_complex.proto:163:20 + + + > message_type[6] > nested_type[1] > field[1] > name: +desc_test_complex.proto:163:21 +desc_test_complex.proto:163:28 + + + > message_type[6] > nested_type[1] > field[1] > number: +desc_test_complex.proto:163:31 +desc_test_complex.proto:163:32 + + + > message_type[6] > nested_type[2]: +desc_test_complex.proto:165:3 +desc_test_complex.proto:170:4 + + + > message_type[6] > nested_type[2] > name: +desc_test_complex.proto:165:11 +desc_test_complex.proto:165:23 + + + > message_type[6] > nested_type[2] > field[0]: +desc_test_complex.proto:166:5 +desc_test_complex.proto:166:35 + + + > message_type[6] > nested_type[2] > field[0] > label: +desc_test_complex.proto:166:5 +desc_test_complex.proto:166:13 + + + > message_type[6] > nested_type[2] > field[0] > type: +desc_test_complex.proto:166:14 +desc_test_complex.proto:166:18 + + + > message_type[6] > nested_type[2] > field[0] > name: +desc_test_complex.proto:166:19 +desc_test_complex.proto:166:30 + + + > message_type[6] > nested_type[2] > field[0] > number: +desc_test_complex.proto:166:33 +desc_test_complex.proto:166:34 + + + > message_type[6] > nested_type[2] > field[1]: +desc_test_complex.proto:167:5 +desc_test_complex.proto:167:34 + + + > message_type[6] > nested_type[2] > field[1] > label: +desc_test_complex.proto:167:5 +desc_test_complex.proto:167:13 + + + > message_type[6] > nested_type[2] > field[1] > type: +desc_test_complex.proto:167:14 +desc_test_complex.proto:167:19 + + + > message_type[6] > nested_type[2] > field[1] > name: +desc_test_complex.proto:167:20 +desc_test_complex.proto:167:29 + + + > message_type[6] > nested_type[2] > field[1] > number: +desc_test_complex.proto:167:32 +desc_test_complex.proto:167:33 + + + > message_type[6] > nested_type[2] > field[2]: +desc_test_complex.proto:168:5 +desc_test_complex.proto:168:34 + + + > message_type[6] > nested_type[2] > field[2] > label: +desc_test_complex.proto:168:5 +desc_test_complex.proto:168:13 + + + > message_type[6] > nested_type[2] > field[2] > type: +desc_test_complex.proto:168:14 +desc_test_complex.proto:168:19 + + + > message_type[6] > nested_type[2] > field[2] > name: +desc_test_complex.proto:168:20 +desc_test_complex.proto:168:29 + + + > message_type[6] > nested_type[2] > field[2] > number: +desc_test_complex.proto:168:32 +desc_test_complex.proto:168:33 + + + > message_type[6] > nested_type[2] > field[3]: +desc_test_complex.proto:169:5 +desc_test_complex.proto:169:29 + + + > message_type[6] > nested_type[2] > field[3] > label: +desc_test_complex.proto:169:5 +desc_test_complex.proto:169:13 + + + > message_type[6] > nested_type[2] > field[3] > type_name: +desc_test_complex.proto:169:14 +desc_test_complex.proto:169:18 + + + > message_type[6] > nested_type[2] > field[3] > name: +desc_test_complex.proto:169:19 +desc_test_complex.proto:169:24 + + + > message_type[6] > nested_type[2] > field[3] > number: +desc_test_complex.proto:169:27 +desc_test_complex.proto:169:28 + + + > message_type[6] > oneof_decl[0]: +desc_test_complex.proto:171:3 +desc_test_complex.proto:179:4 + + + > message_type[6] > oneof_decl[0] > name: +desc_test_complex.proto:171:9 +desc_test_complex.proto:171:13 + + + > message_type[6] > field[0]: +desc_test_complex.proto:172:5 +desc_test_complex.proto:172:27 + + + > message_type[6] > field[0] > type_name: +desc_test_complex.proto:172:5 +desc_test_complex.proto:172:15 + + + > message_type[6] > field[0] > name: +desc_test_complex.proto:172:16 +desc_test_complex.proto:172:22 + + + > message_type[6] > field[0] > number: +desc_test_complex.proto:172:25 +desc_test_complex.proto:172:26 + + + > message_type[6] > field[1]: +desc_test_complex.proto:173:5 +desc_test_complex.proto:173:31 + + + > message_type[6] > field[1] > type_name: +desc_test_complex.proto:173:5 +desc_test_complex.proto:173:17 + + + > message_type[6] > field[1] > name: +desc_test_complex.proto:173:18 +desc_test_complex.proto:173:26 + + + > message_type[6] > field[1] > number: +desc_test_complex.proto:173:29 +desc_test_complex.proto:173:30 + + + > message_type[6] > field[2]: +desc_test_complex.proto:174:5 +desc_test_complex.proto:174:21 + + + > message_type[6] > field[2] > type_name: +desc_test_complex.proto:174:5 +desc_test_complex.proto:174:12 + + + > message_type[6] > field[2] > name: +desc_test_complex.proto:174:13 +desc_test_complex.proto:174:16 + + + > message_type[6] > field[2] > number: +desc_test_complex.proto:174:19 +desc_test_complex.proto:174:20 + + + > message_type[6] > field[3]: +desc_test_complex.proto:175:9 +desc_test_complex.proto:178:10 + + + > message_type[6] > field[3] > type: +desc_test_complex.proto:175:9 +desc_test_complex.proto:175:14 + + + > message_type[6] > field[3] > name: +desc_test_complex.proto:175:15 +desc_test_complex.proto:175:24 + + + > message_type[6] > field[3] > number: +desc_test_complex.proto:175:27 +desc_test_complex.proto:175:28 + + + > message_type[6] > nested_type[3]: +desc_test_complex.proto:175:9 +desc_test_complex.proto:178:10 + + + > message_type[6] > nested_type[3] > name: +desc_test_complex.proto:175:15 +desc_test_complex.proto:175:24 + + + > message_type[6] > field[3] > type_name: +desc_test_complex.proto:175:15 +desc_test_complex.proto:175:24 + + + > message_type[6] > nested_type[3] > field[0]: +desc_test_complex.proto:176:17 +desc_test_complex.proto:176:45 + + + > message_type[6] > nested_type[3] > field[0] > label: +desc_test_complex.proto:176:17 +desc_test_complex.proto:176:25 + + + > message_type[6] > nested_type[3] > field[0] > type: +desc_test_complex.proto:176:26 +desc_test_complex.proto:176:32 + + + > message_type[6] > nested_type[3] > field[0] > name: +desc_test_complex.proto:176:33 +desc_test_complex.proto:176:40 + + + > message_type[6] > nested_type[3] > field[0] > number: +desc_test_complex.proto:176:43 +desc_test_complex.proto:176:44 + + + > message_type[6] > nested_type[3] > field[1]: +desc_test_complex.proto:177:17 +desc_test_complex.proto:177:45 + + + > message_type[6] > nested_type[3] > field[1] > label: +desc_test_complex.proto:177:17 +desc_test_complex.proto:177:25 + + + > message_type[6] > nested_type[3] > field[1] > type: +desc_test_complex.proto:177:26 +desc_test_complex.proto:177:32 + + + > message_type[6] > nested_type[3] > field[1] > name: +desc_test_complex.proto:177:33 +desc_test_complex.proto:177:40 + + + > message_type[6] > nested_type[3] > field[1] > number: +desc_test_complex.proto:177:43 +desc_test_complex.proto:177:44 + + + > extension: +desc_test_complex.proto:182:1 +desc_test_complex.proto:184:2 + + + > extension[6]: +desc_test_complex.proto:183:3 +desc_test_complex.proto:183:30 + + + > extension[6] > extendee: +desc_test_complex.proto:182:8 +desc_test_complex.proto:182:36 + + + > extension[6] > label: +desc_test_complex.proto:183:3 +desc_test_complex.proto:183:11 + + + > extension[6] > type_name: +desc_test_complex.proto:183:12 +desc_test_complex.proto:183:16 + + + > extension[6] > name: +desc_test_complex.proto:183:17 +desc_test_complex.proto:183:22 + + + > extension[6] > number: +desc_test_complex.proto:183:25 +desc_test_complex.proto:183:29 + + + > message_type[7]: +desc_test_complex.proto:186:1 +desc_test_complex.proto:192:2 + + + > message_type[7] > name: +desc_test_complex.proto:186:9 +desc_test_complex.proto:186:24 + + + > message_type[7] > field[0]: +desc_test_complex.proto:187:5 +desc_test_complex.proto:191:11 + + + > message_type[7] > field[0] > label: +desc_test_complex.proto:187:5 +desc_test_complex.proto:187:13 + + + > message_type[7] > field[0] > type: +desc_test_complex.proto:187:14 +desc_test_complex.proto:187:20 + + + > message_type[7] > field[0] > name: +desc_test_complex.proto:187:21 +desc_test_complex.proto:187:29 + + + > message_type[7] > field[0] > number: +desc_test_complex.proto:187:32 +desc_test_complex.proto:187:33 + + + > message_type[7] > field[0] > options: +desc_test_complex.proto:188:7 +desc_test_complex.proto:191:10 + + + > message_type[7] > field[0] > options > (foo.bar.rules) > repeated: +desc_test_complex.proto:188:8 +desc_test_complex.proto:191:9 + + + > message_type[8]: +desc_test_complex.proto:196:1 +desc_test_complex.proto:232:2 + Leading detached comment [0]: + tests cases where field names collide with keywords + + + + > message_type[8] > name: +desc_test_complex.proto:196:9 +desc_test_complex.proto:196:26 + + + > message_type[8] > field[0]: +desc_test_complex.proto:197:9 +desc_test_complex.proto:197:34 + + + > message_type[8] > field[0] > label: +desc_test_complex.proto:197:9 +desc_test_complex.proto:197:17 + + + > message_type[8] > field[0] > type: +desc_test_complex.proto:197:18 +desc_test_complex.proto:197:22 + + + > message_type[8] > field[0] > name: +desc_test_complex.proto:197:23 +desc_test_complex.proto:197:29 + + + > message_type[8] > field[0] > number: +desc_test_complex.proto:197:32 +desc_test_complex.proto:197:33 + + + > message_type[8] > field[1]: +desc_test_complex.proto:198:9 +desc_test_complex.proto:198:34 + + + > message_type[8] > field[1] > label: +desc_test_complex.proto:198:9 +desc_test_complex.proto:198:17 + + + > message_type[8] > field[1] > type: +desc_test_complex.proto:198:18 +desc_test_complex.proto:198:22 + + + > message_type[8] > field[1] > name: +desc_test_complex.proto:198:23 +desc_test_complex.proto:198:29 + + + > message_type[8] > field[1] > number: +desc_test_complex.proto:198:32 +desc_test_complex.proto:198:33 + + + > message_type[8] > field[2]: +desc_test_complex.proto:199:9 +desc_test_complex.proto:199:34 + + + > message_type[8] > field[2] > label: +desc_test_complex.proto:199:9 +desc_test_complex.proto:199:17 + + + > message_type[8] > field[2] > type: +desc_test_complex.proto:199:18 +desc_test_complex.proto:199:22 + + + > message_type[8] > field[2] > name: +desc_test_complex.proto:199:23 +desc_test_complex.proto:199:29 + + + > message_type[8] > field[2] > number: +desc_test_complex.proto:199:32 +desc_test_complex.proto:199:33 + + + > message_type[8] > field[3]: +desc_test_complex.proto:200:9 +desc_test_complex.proto:200:32 + + + > message_type[8] > field[3] > label: +desc_test_complex.proto:200:9 +desc_test_complex.proto:200:17 + + + > message_type[8] > field[3] > type: +desc_test_complex.proto:200:18 +desc_test_complex.proto:200:22 + + + > message_type[8] > field[3] > name: +desc_test_complex.proto:200:23 +desc_test_complex.proto:200:27 + + + > message_type[8] > field[3] > number: +desc_test_complex.proto:200:30 +desc_test_complex.proto:200:31 + + + > message_type[8] > field[4]: +desc_test_complex.proto:201:9 +desc_test_complex.proto:201:35 + + + > message_type[8] > field[4] > label: +desc_test_complex.proto:201:9 +desc_test_complex.proto:201:17 + + + > message_type[8] > field[4] > type: +desc_test_complex.proto:201:18 +desc_test_complex.proto:201:22 + + + > message_type[8] > field[4] > name: +desc_test_complex.proto:201:23 +desc_test_complex.proto:201:30 + + + > message_type[8] > field[4] > number: +desc_test_complex.proto:201:33 +desc_test_complex.proto:201:34 + + + > message_type[8] > field[5]: +desc_test_complex.proto:202:9 +desc_test_complex.proto:202:36 + + + > message_type[8] > field[5] > label: +desc_test_complex.proto:202:9 +desc_test_complex.proto:202:17 + + + > message_type[8] > field[5] > type: +desc_test_complex.proto:202:18 +desc_test_complex.proto:202:24 + + + > message_type[8] > field[5] > name: +desc_test_complex.proto:202:25 +desc_test_complex.proto:202:31 + + + > message_type[8] > field[5] > number: +desc_test_complex.proto:202:34 +desc_test_complex.proto:202:35 + + + > message_type[8] > field[6]: +desc_test_complex.proto:203:9 +desc_test_complex.proto:203:34 + + + > message_type[8] > field[6] > label: +desc_test_complex.proto:203:9 +desc_test_complex.proto:203:17 + + + > message_type[8] > field[6] > type: +desc_test_complex.proto:203:18 +desc_test_complex.proto:203:23 + + + > message_type[8] > field[6] > name: +desc_test_complex.proto:203:24 +desc_test_complex.proto:203:29 + + + > message_type[8] > field[6] > number: +desc_test_complex.proto:203:32 +desc_test_complex.proto:203:33 + + + > message_type[8] > field[7]: +desc_test_complex.proto:204:9 +desc_test_complex.proto:204:34 + + + > message_type[8] > field[7] > label: +desc_test_complex.proto:204:9 +desc_test_complex.proto:204:17 + + + > message_type[8] > field[7] > type: +desc_test_complex.proto:204:18 +desc_test_complex.proto:204:23 + + + > message_type[8] > field[7] > name: +desc_test_complex.proto:204:24 +desc_test_complex.proto:204:29 + + + > message_type[8] > field[7] > number: +desc_test_complex.proto:204:32 +desc_test_complex.proto:204:33 + + + > message_type[8] > field[8]: +desc_test_complex.proto:205:9 +desc_test_complex.proto:205:34 + + + > message_type[8] > field[8] > label: +desc_test_complex.proto:205:9 +desc_test_complex.proto:205:17 + + + > message_type[8] > field[8] > type: +desc_test_complex.proto:205:18 +desc_test_complex.proto:205:23 + + + > message_type[8] > field[8] > name: +desc_test_complex.proto:205:24 +desc_test_complex.proto:205:29 + + + > message_type[8] > field[8] > number: +desc_test_complex.proto:205:32 +desc_test_complex.proto:205:33 + + + > message_type[8] > field[9]: +desc_test_complex.proto:206:9 +desc_test_complex.proto:206:37 + + + > message_type[8] > field[9] > label: +desc_test_complex.proto:206:9 +desc_test_complex.proto:206:17 + + + > message_type[8] > field[9] > type: +desc_test_complex.proto:206:18 +desc_test_complex.proto:206:24 + + + > message_type[8] > field[9] > name: +desc_test_complex.proto:206:25 +desc_test_complex.proto:206:31 + + + > message_type[8] > field[9] > number: +desc_test_complex.proto:206:34 +desc_test_complex.proto:206:36 + + + > message_type[8] > field[10]: +desc_test_complex.proto:207:9 +desc_test_complex.proto:207:37 + + + > message_type[8] > field[10] > label: +desc_test_complex.proto:207:9 +desc_test_complex.proto:207:17 + + + > message_type[8] > field[10] > type: +desc_test_complex.proto:207:18 +desc_test_complex.proto:207:24 + + + > message_type[8] > field[10] > name: +desc_test_complex.proto:207:25 +desc_test_complex.proto:207:31 + + + > message_type[8] > field[10] > number: +desc_test_complex.proto:207:34 +desc_test_complex.proto:207:36 + + + > message_type[8] > field[11]: +desc_test_complex.proto:208:9 +desc_test_complex.proto:208:37 + + + > message_type[8] > field[11] > label: +desc_test_complex.proto:208:9 +desc_test_complex.proto:208:17 + + + > message_type[8] > field[11] > type: +desc_test_complex.proto:208:18 +desc_test_complex.proto:208:24 + + + > message_type[8] > field[11] > name: +desc_test_complex.proto:208:25 +desc_test_complex.proto:208:31 + + + > message_type[8] > field[11] > number: +desc_test_complex.proto:208:34 +desc_test_complex.proto:208:36 + + + > message_type[8] > field[12]: +desc_test_complex.proto:209:9 +desc_test_complex.proto:209:37 + + + > message_type[8] > field[12] > label: +desc_test_complex.proto:209:9 +desc_test_complex.proto:209:17 + + + > message_type[8] > field[12] > type: +desc_test_complex.proto:209:18 +desc_test_complex.proto:209:24 + + + > message_type[8] > field[12] > name: +desc_test_complex.proto:209:25 +desc_test_complex.proto:209:31 + + + > message_type[8] > field[12] > number: +desc_test_complex.proto:209:34 +desc_test_complex.proto:209:36 + + + > message_type[8] > field[13]: +desc_test_complex.proto:210:9 +desc_test_complex.proto:210:39 + + + > message_type[8] > field[13] > label: +desc_test_complex.proto:210:9 +desc_test_complex.proto:210:17 + + + > message_type[8] > field[13] > type: +desc_test_complex.proto:210:18 +desc_test_complex.proto:210:25 + + + > message_type[8] > field[13] > name: +desc_test_complex.proto:210:26 +desc_test_complex.proto:210:33 + + + > message_type[8] > field[13] > number: +desc_test_complex.proto:210:36 +desc_test_complex.proto:210:38 + + + > message_type[8] > field[14]: +desc_test_complex.proto:211:9 +desc_test_complex.proto:211:39 + + + > message_type[8] > field[14] > label: +desc_test_complex.proto:211:9 +desc_test_complex.proto:211:17 + + + > message_type[8] > field[14] > type: +desc_test_complex.proto:211:18 +desc_test_complex.proto:211:25 + + + > message_type[8] > field[14] > name: +desc_test_complex.proto:211:26 +desc_test_complex.proto:211:33 + + + > message_type[8] > field[14] > number: +desc_test_complex.proto:211:36 +desc_test_complex.proto:211:38 + + + > message_type[8] > field[15]: +desc_test_complex.proto:212:9 +desc_test_complex.proto:212:41 + + + > message_type[8] > field[15] > label: +desc_test_complex.proto:212:9 +desc_test_complex.proto:212:17 + + + > message_type[8] > field[15] > type: +desc_test_complex.proto:212:18 +desc_test_complex.proto:212:26 + + + > message_type[8] > field[15] > name: +desc_test_complex.proto:212:27 +desc_test_complex.proto:212:35 + + + > message_type[8] > field[15] > number: +desc_test_complex.proto:212:38 +desc_test_complex.proto:212:40 + + + > message_type[8] > field[16]: +desc_test_complex.proto:213:9 +desc_test_complex.proto:213:41 + + + > message_type[8] > field[16] > label: +desc_test_complex.proto:213:9 +desc_test_complex.proto:213:17 + + + > message_type[8] > field[16] > type: +desc_test_complex.proto:213:18 +desc_test_complex.proto:213:26 + + + > message_type[8] > field[16] > name: +desc_test_complex.proto:213:27 +desc_test_complex.proto:213:35 + + + > message_type[8] > field[16] > number: +desc_test_complex.proto:213:38 +desc_test_complex.proto:213:40 + + + > message_type[8] > field[17]: +desc_test_complex.proto:214:9 +desc_test_complex.proto:214:33 + + + > message_type[8] > field[17] > label: +desc_test_complex.proto:214:9 +desc_test_complex.proto:214:17 + + + > message_type[8] > field[17] > type: +desc_test_complex.proto:214:18 +desc_test_complex.proto:214:22 + + + > message_type[8] > field[17] > name: +desc_test_complex.proto:214:23 +desc_test_complex.proto:214:27 + + + > message_type[8] > field[17] > number: +desc_test_complex.proto:214:30 +desc_test_complex.proto:214:32 + + + > message_type[8] > field[18]: +desc_test_complex.proto:215:9 +desc_test_complex.proto:215:35 + + + > message_type[8] > field[18] > label: +desc_test_complex.proto:215:9 +desc_test_complex.proto:215:17 + + + > message_type[8] > field[18] > type: +desc_test_complex.proto:215:18 +desc_test_complex.proto:215:23 + + + > message_type[8] > field[18] > name: +desc_test_complex.proto:215:24 +desc_test_complex.proto:215:29 + + + > message_type[8] > field[18] > number: +desc_test_complex.proto:215:32 +desc_test_complex.proto:215:34 + + + > message_type[8] > field[19]: +desc_test_complex.proto:216:9 +desc_test_complex.proto:216:37 + + + > message_type[8] > field[19] > label: +desc_test_complex.proto:216:9 +desc_test_complex.proto:216:17 + + + > message_type[8] > field[19] > type: +desc_test_complex.proto:216:18 +desc_test_complex.proto:216:24 + + + > message_type[8] > field[19] > name: +desc_test_complex.proto:216:25 +desc_test_complex.proto:216:31 + + + > message_type[8] > field[19] > number: +desc_test_complex.proto:216:34 +desc_test_complex.proto:216:36 + + + > message_type[8] > field[20]: +desc_test_complex.proto:217:9 +desc_test_complex.proto:217:37 + + + > message_type[8] > field[20] > label: +desc_test_complex.proto:217:9 +desc_test_complex.proto:217:17 + + + > message_type[8] > field[20] > type: +desc_test_complex.proto:217:18 +desc_test_complex.proto:217:22 + + + > message_type[8] > field[20] > name: +desc_test_complex.proto:217:23 +desc_test_complex.proto:217:31 + + + > message_type[8] > field[20] > number: +desc_test_complex.proto:217:34 +desc_test_complex.proto:217:36 + + + > message_type[8] > field[21]: +desc_test_complex.proto:218:9 +desc_test_complex.proto:218:37 + + + > message_type[8] > field[21] > label: +desc_test_complex.proto:218:9 +desc_test_complex.proto:218:17 + + + > message_type[8] > field[21] > type: +desc_test_complex.proto:218:18 +desc_test_complex.proto:218:22 + + + > message_type[8] > field[21] > name: +desc_test_complex.proto:218:23 +desc_test_complex.proto:218:31 + + + > message_type[8] > field[21] > number: +desc_test_complex.proto:218:34 +desc_test_complex.proto:218:36 + + + > message_type[8] > field[22]: +desc_test_complex.proto:219:9 +desc_test_complex.proto:219:37 + + + > message_type[8] > field[22] > label: +desc_test_complex.proto:219:9 +desc_test_complex.proto:219:17 + + + > message_type[8] > field[22] > type: +desc_test_complex.proto:219:18 +desc_test_complex.proto:219:22 + + + > message_type[8] > field[22] > name: +desc_test_complex.proto:219:23 +desc_test_complex.proto:219:31 + + + > message_type[8] > field[22] > number: +desc_test_complex.proto:219:34 +desc_test_complex.proto:219:36 + + + > message_type[8] > field[23]: +desc_test_complex.proto:220:9 +desc_test_complex.proto:220:36 + + + > message_type[8] > field[23] > label: +desc_test_complex.proto:220:9 +desc_test_complex.proto:220:17 + + + > message_type[8] > field[23] > type: +desc_test_complex.proto:220:18 +desc_test_complex.proto:220:22 + + + > message_type[8] > field[23] > name: +desc_test_complex.proto:220:23 +desc_test_complex.proto:220:30 + + + > message_type[8] > field[23] > number: +desc_test_complex.proto:220:33 +desc_test_complex.proto:220:35 + + + > message_type[8] > field[24]: +desc_test_complex.proto:221:9 +desc_test_complex.proto:221:33 + + + > message_type[8] > field[24] > label: +desc_test_complex.proto:221:9 +desc_test_complex.proto:221:17 + + + > message_type[8] > field[24] > type: +desc_test_complex.proto:221:18 +desc_test_complex.proto:221:22 + + + > message_type[8] > field[24] > name: +desc_test_complex.proto:221:23 +desc_test_complex.proto:221:27 + + + > message_type[8] > field[24] > number: +desc_test_complex.proto:221:30 +desc_test_complex.proto:221:32 + + + > message_type[8] > field[25]: +desc_test_complex.proto:222:9 +desc_test_complex.proto:222:36 + + + > message_type[8] > field[25] > label: +desc_test_complex.proto:222:9 +desc_test_complex.proto:222:17 + + + > message_type[8] > field[25] > type: +desc_test_complex.proto:222:18 +desc_test_complex.proto:222:22 + + + > message_type[8] > field[25] > name: +desc_test_complex.proto:222:23 +desc_test_complex.proto:222:30 + + + > message_type[8] > field[25] > number: +desc_test_complex.proto:222:33 +desc_test_complex.proto:222:35 + + + > message_type[8] > field[26]: +desc_test_complex.proto:223:9 +desc_test_complex.proto:223:32 + + + > message_type[8] > field[26] > label: +desc_test_complex.proto:223:9 +desc_test_complex.proto:223:17 + + + > message_type[8] > field[26] > type: +desc_test_complex.proto:223:18 +desc_test_complex.proto:223:22 + + + > message_type[8] > field[26] > name: +desc_test_complex.proto:223:23 +desc_test_complex.proto:223:26 + + + > message_type[8] > field[26] > number: +desc_test_complex.proto:223:29 +desc_test_complex.proto:223:31 + + + > message_type[8] > field[27]: +desc_test_complex.proto:224:9 +desc_test_complex.proto:224:35 + + + > message_type[8] > field[27] > label: +desc_test_complex.proto:224:9 +desc_test_complex.proto:224:17 + + + > message_type[8] > field[27] > type: +desc_test_complex.proto:224:18 +desc_test_complex.proto:224:22 + + + > message_type[8] > field[27] > name: +desc_test_complex.proto:224:23 +desc_test_complex.proto:224:29 + + + > message_type[8] > field[27] > number: +desc_test_complex.proto:224:32 +desc_test_complex.proto:224:34 + + + > message_type[8] > field[28]: +desc_test_complex.proto:225:9 +desc_test_complex.proto:225:35 + + + > message_type[8] > field[28] > label: +desc_test_complex.proto:225:9 +desc_test_complex.proto:225:17 + + + > message_type[8] > field[28] > type: +desc_test_complex.proto:225:18 +desc_test_complex.proto:225:22 + + + > message_type[8] > field[28] > name: +desc_test_complex.proto:225:23 +desc_test_complex.proto:225:29 + + + > message_type[8] > field[28] > number: +desc_test_complex.proto:225:32 +desc_test_complex.proto:225:34 + + + > message_type[8] > field[29]: +desc_test_complex.proto:226:9 +desc_test_complex.proto:226:39 + + + > message_type[8] > field[29] > label: +desc_test_complex.proto:226:9 +desc_test_complex.proto:226:17 + + + > message_type[8] > field[29] > type: +desc_test_complex.proto:226:18 +desc_test_complex.proto:226:22 + + + > message_type[8] > field[29] > name: +desc_test_complex.proto:226:23 +desc_test_complex.proto:226:33 + + + > message_type[8] > field[29] > number: +desc_test_complex.proto:226:36 +desc_test_complex.proto:226:38 + + + > message_type[8] > field[30]: +desc_test_complex.proto:227:9 +desc_test_complex.proto:227:37 + + + > message_type[8] > field[30] > label: +desc_test_complex.proto:227:9 +desc_test_complex.proto:227:17 + + + > message_type[8] > field[30] > type: +desc_test_complex.proto:227:18 +desc_test_complex.proto:227:22 + + + > message_type[8] > field[30] > name: +desc_test_complex.proto:227:23 +desc_test_complex.proto:227:31 + + + > message_type[8] > field[30] > number: +desc_test_complex.proto:227:34 +desc_test_complex.proto:227:36 + + + > message_type[8] > field[31]: +desc_test_complex.proto:228:9 +desc_test_complex.proto:228:31 + + + > message_type[8] > field[31] > label: +desc_test_complex.proto:228:9 +desc_test_complex.proto:228:17 + + + > message_type[8] > field[31] > type: +desc_test_complex.proto:228:18 +desc_test_complex.proto:228:22 + + + > message_type[8] > field[31] > name: +desc_test_complex.proto:228:23 +desc_test_complex.proto:228:25 + + + > message_type[8] > field[31] > number: +desc_test_complex.proto:228:28 +desc_test_complex.proto:228:30 + + + > message_type[8] > field[32]: +desc_test_complex.proto:229:9 +desc_test_complex.proto:229:34 + + + > message_type[8] > field[32] > label: +desc_test_complex.proto:229:9 +desc_test_complex.proto:229:17 + + + > message_type[8] > field[32] > type: +desc_test_complex.proto:229:18 +desc_test_complex.proto:229:23 + + + > message_type[8] > field[32] > name: +desc_test_complex.proto:229:24 +desc_test_complex.proto:229:28 + + + > message_type[8] > field[32] > number: +desc_test_complex.proto:229:31 +desc_test_complex.proto:229:33 + + + > message_type[8] > field[33]: +desc_test_complex.proto:230:9 +desc_test_complex.proto:230:35 + + + > message_type[8] > field[33] > label: +desc_test_complex.proto:230:9 +desc_test_complex.proto:230:17 + + + > message_type[8] > field[33] > type: +desc_test_complex.proto:230:18 +desc_test_complex.proto:230:23 + + + > message_type[8] > field[33] > name: +desc_test_complex.proto:230:24 +desc_test_complex.proto:230:29 + + + > message_type[8] > field[33] > number: +desc_test_complex.proto:230:32 +desc_test_complex.proto:230:34 + + + > message_type[8] > field[34]: +desc_test_complex.proto:231:9 +desc_test_complex.proto:231:37 + + + > message_type[8] > field[34] > label: +desc_test_complex.proto:231:9 +desc_test_complex.proto:231:17 + + + > message_type[8] > field[34] > type: +desc_test_complex.proto:231:18 +desc_test_complex.proto:231:23 + + + > message_type[8] > field[34] > name: +desc_test_complex.proto:231:24 +desc_test_complex.proto:231:31 + + + > message_type[8] > field[34] > number: +desc_test_complex.proto:231:34 +desc_test_complex.proto:231:36 + + + > extension: +desc_test_complex.proto:234:1 +desc_test_complex.proto:271:2 + + + > extension[7]: +desc_test_complex.proto:235:9 +desc_test_complex.proto:235:38 + + + > extension[7] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[7] > label: +desc_test_complex.proto:235:9 +desc_test_complex.proto:235:17 + + + > extension[7] > type: +desc_test_complex.proto:235:18 +desc_test_complex.proto:235:22 + + + > extension[7] > name: +desc_test_complex.proto:235:23 +desc_test_complex.proto:235:29 + + + > extension[7] > number: +desc_test_complex.proto:235:32 +desc_test_complex.proto:235:37 + + + > extension[8]: +desc_test_complex.proto:236:9 +desc_test_complex.proto:236:38 + + + > extension[8] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[8] > label: +desc_test_complex.proto:236:9 +desc_test_complex.proto:236:17 + + + > extension[8] > type: +desc_test_complex.proto:236:18 +desc_test_complex.proto:236:22 + + + > extension[8] > name: +desc_test_complex.proto:236:23 +desc_test_complex.proto:236:29 + + + > extension[8] > number: +desc_test_complex.proto:236:32 +desc_test_complex.proto:236:37 + + + > extension[9]: +desc_test_complex.proto:237:9 +desc_test_complex.proto:237:38 + + + > extension[9] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[9] > label: +desc_test_complex.proto:237:9 +desc_test_complex.proto:237:17 + + + > extension[9] > type: +desc_test_complex.proto:237:18 +desc_test_complex.proto:237:22 + + + > extension[9] > name: +desc_test_complex.proto:237:23 +desc_test_complex.proto:237:29 + + + > extension[9] > number: +desc_test_complex.proto:237:32 +desc_test_complex.proto:237:37 + + + > extension[10]: +desc_test_complex.proto:238:9 +desc_test_complex.proto:238:36 + + + > extension[10] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[10] > label: +desc_test_complex.proto:238:9 +desc_test_complex.proto:238:17 + + + > extension[10] > type: +desc_test_complex.proto:238:18 +desc_test_complex.proto:238:22 + + + > extension[10] > name: +desc_test_complex.proto:238:23 +desc_test_complex.proto:238:27 + + + > extension[10] > number: +desc_test_complex.proto:238:30 +desc_test_complex.proto:238:35 + + + > extension[11]: +desc_test_complex.proto:239:9 +desc_test_complex.proto:239:39 + + + > extension[11] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[11] > label: +desc_test_complex.proto:239:9 +desc_test_complex.proto:239:17 + + + > extension[11] > type: +desc_test_complex.proto:239:18 +desc_test_complex.proto:239:22 + + + > extension[11] > name: +desc_test_complex.proto:239:23 +desc_test_complex.proto:239:30 + + + > extension[11] > number: +desc_test_complex.proto:239:33 +desc_test_complex.proto:239:38 + + + > extension[12]: +desc_test_complex.proto:240:9 +desc_test_complex.proto:240:40 + + + > extension[12] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[12] > label: +desc_test_complex.proto:240:9 +desc_test_complex.proto:240:17 + + + > extension[12] > type: +desc_test_complex.proto:240:18 +desc_test_complex.proto:240:24 + + + > extension[12] > name: +desc_test_complex.proto:240:25 +desc_test_complex.proto:240:31 + + + > extension[12] > number: +desc_test_complex.proto:240:34 +desc_test_complex.proto:240:39 + + + > extension[13]: +desc_test_complex.proto:241:9 +desc_test_complex.proto:241:38 + + + > extension[13] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[13] > label: +desc_test_complex.proto:241:9 +desc_test_complex.proto:241:17 + + + > extension[13] > type: +desc_test_complex.proto:241:18 +desc_test_complex.proto:241:23 + + + > extension[13] > name: +desc_test_complex.proto:241:24 +desc_test_complex.proto:241:29 + + + > extension[13] > number: +desc_test_complex.proto:241:32 +desc_test_complex.proto:241:37 + + + > extension[14]: +desc_test_complex.proto:242:9 +desc_test_complex.proto:242:38 + + + > extension[14] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[14] > label: +desc_test_complex.proto:242:9 +desc_test_complex.proto:242:17 + + + > extension[14] > type: +desc_test_complex.proto:242:18 +desc_test_complex.proto:242:23 + + + > extension[14] > name: +desc_test_complex.proto:242:24 +desc_test_complex.proto:242:29 + + + > extension[14] > number: +desc_test_complex.proto:242:32 +desc_test_complex.proto:242:37 + + + > extension[15]: +desc_test_complex.proto:243:9 +desc_test_complex.proto:243:38 + + + > extension[15] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[15] > label: +desc_test_complex.proto:243:9 +desc_test_complex.proto:243:17 + + + > extension[15] > type: +desc_test_complex.proto:243:18 +desc_test_complex.proto:243:23 + + + > extension[15] > name: +desc_test_complex.proto:243:24 +desc_test_complex.proto:243:29 + + + > extension[15] > number: +desc_test_complex.proto:243:32 +desc_test_complex.proto:243:37 + + + > extension[16]: +desc_test_complex.proto:244:9 +desc_test_complex.proto:244:40 + + + > extension[16] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[16] > label: +desc_test_complex.proto:244:9 +desc_test_complex.proto:244:17 + + + > extension[16] > type: +desc_test_complex.proto:244:18 +desc_test_complex.proto:244:24 + + + > extension[16] > name: +desc_test_complex.proto:244:25 +desc_test_complex.proto:244:31 + + + > extension[16] > number: +desc_test_complex.proto:244:34 +desc_test_complex.proto:244:39 + + + > extension[17]: +desc_test_complex.proto:245:9 +desc_test_complex.proto:245:40 + + + > extension[17] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[17] > label: +desc_test_complex.proto:245:9 +desc_test_complex.proto:245:17 + + + > extension[17] > type: +desc_test_complex.proto:245:18 +desc_test_complex.proto:245:24 + + + > extension[17] > name: +desc_test_complex.proto:245:25 +desc_test_complex.proto:245:31 + + + > extension[17] > number: +desc_test_complex.proto:245:34 +desc_test_complex.proto:245:39 + + + > extension[18]: +desc_test_complex.proto:246:9 +desc_test_complex.proto:246:40 + + + > extension[18] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[18] > label: +desc_test_complex.proto:246:9 +desc_test_complex.proto:246:17 + + + > extension[18] > type: +desc_test_complex.proto:246:18 +desc_test_complex.proto:246:24 + + + > extension[18] > name: +desc_test_complex.proto:246:25 +desc_test_complex.proto:246:31 + + + > extension[18] > number: +desc_test_complex.proto:246:34 +desc_test_complex.proto:246:39 + + + > extension[19]: +desc_test_complex.proto:247:9 +desc_test_complex.proto:247:40 + + + > extension[19] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[19] > label: +desc_test_complex.proto:247:9 +desc_test_complex.proto:247:17 + + + > extension[19] > type: +desc_test_complex.proto:247:18 +desc_test_complex.proto:247:24 + + + > extension[19] > name: +desc_test_complex.proto:247:25 +desc_test_complex.proto:247:31 + + + > extension[19] > number: +desc_test_complex.proto:247:34 +desc_test_complex.proto:247:39 + + + > extension[20]: +desc_test_complex.proto:248:9 +desc_test_complex.proto:248:42 + + + > extension[20] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[20] > label: +desc_test_complex.proto:248:9 +desc_test_complex.proto:248:17 + + + > extension[20] > type: +desc_test_complex.proto:248:18 +desc_test_complex.proto:248:25 + + + > extension[20] > name: +desc_test_complex.proto:248:26 +desc_test_complex.proto:248:33 + + + > extension[20] > number: +desc_test_complex.proto:248:36 +desc_test_complex.proto:248:41 + + + > extension[21]: +desc_test_complex.proto:249:9 +desc_test_complex.proto:249:42 + + + > extension[21] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[21] > label: +desc_test_complex.proto:249:9 +desc_test_complex.proto:249:17 + + + > extension[21] > type: +desc_test_complex.proto:249:18 +desc_test_complex.proto:249:25 + + + > extension[21] > name: +desc_test_complex.proto:249:26 +desc_test_complex.proto:249:33 + + + > extension[21] > number: +desc_test_complex.proto:249:36 +desc_test_complex.proto:249:41 + + + > extension[22]: +desc_test_complex.proto:250:9 +desc_test_complex.proto:250:44 + + + > extension[22] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[22] > label: +desc_test_complex.proto:250:9 +desc_test_complex.proto:250:17 + + + > extension[22] > type: +desc_test_complex.proto:250:18 +desc_test_complex.proto:250:26 + + + > extension[22] > name: +desc_test_complex.proto:250:27 +desc_test_complex.proto:250:35 + + + > extension[22] > number: +desc_test_complex.proto:250:38 +desc_test_complex.proto:250:43 + + + > extension[23]: +desc_test_complex.proto:251:9 +desc_test_complex.proto:251:44 + + + > extension[23] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[23] > label: +desc_test_complex.proto:251:9 +desc_test_complex.proto:251:17 + + + > extension[23] > type: +desc_test_complex.proto:251:18 +desc_test_complex.proto:251:26 + + + > extension[23] > name: +desc_test_complex.proto:251:27 +desc_test_complex.proto:251:35 + + + > extension[23] > number: +desc_test_complex.proto:251:38 +desc_test_complex.proto:251:43 + + + > extension[24]: +desc_test_complex.proto:252:9 +desc_test_complex.proto:252:36 + + + > extension[24] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[24] > label: +desc_test_complex.proto:252:9 +desc_test_complex.proto:252:17 + + + > extension[24] > type: +desc_test_complex.proto:252:18 +desc_test_complex.proto:252:22 + + + > extension[24] > name: +desc_test_complex.proto:252:23 +desc_test_complex.proto:252:27 + + + > extension[24] > number: +desc_test_complex.proto:252:30 +desc_test_complex.proto:252:35 + + + > extension[25]: +desc_test_complex.proto:253:9 +desc_test_complex.proto:253:38 + + + > extension[25] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[25] > label: +desc_test_complex.proto:253:9 +desc_test_complex.proto:253:17 + + + > extension[25] > type: +desc_test_complex.proto:253:18 +desc_test_complex.proto:253:23 + + + > extension[25] > name: +desc_test_complex.proto:253:24 +desc_test_complex.proto:253:29 + + + > extension[25] > number: +desc_test_complex.proto:253:32 +desc_test_complex.proto:253:37 + + + > extension[26]: +desc_test_complex.proto:254:9 +desc_test_complex.proto:254:40 + + + > extension[26] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[26] > label: +desc_test_complex.proto:254:9 +desc_test_complex.proto:254:17 + + + > extension[26] > type: +desc_test_complex.proto:254:18 +desc_test_complex.proto:254:24 + + + > extension[26] > name: +desc_test_complex.proto:254:25 +desc_test_complex.proto:254:31 + + + > extension[26] > number: +desc_test_complex.proto:254:34 +desc_test_complex.proto:254:39 + + + > extension[27]: +desc_test_complex.proto:255:9 +desc_test_complex.proto:255:40 + + + > extension[27] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[27] > label: +desc_test_complex.proto:255:9 +desc_test_complex.proto:255:17 + + + > extension[27] > type: +desc_test_complex.proto:255:18 +desc_test_complex.proto:255:22 + + + > extension[27] > name: +desc_test_complex.proto:255:23 +desc_test_complex.proto:255:31 + + + > extension[27] > number: +desc_test_complex.proto:255:34 +desc_test_complex.proto:255:39 + + + > extension[28]: +desc_test_complex.proto:256:9 +desc_test_complex.proto:256:40 + + + > extension[28] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[28] > label: +desc_test_complex.proto:256:9 +desc_test_complex.proto:256:17 + + + > extension[28] > type: +desc_test_complex.proto:256:18 +desc_test_complex.proto:256:22 + + + > extension[28] > name: +desc_test_complex.proto:256:23 +desc_test_complex.proto:256:31 + + + > extension[28] > number: +desc_test_complex.proto:256:34 +desc_test_complex.proto:256:39 + + + > extension[29]: +desc_test_complex.proto:257:9 +desc_test_complex.proto:257:40 + + + > extension[29] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[29] > label: +desc_test_complex.proto:257:9 +desc_test_complex.proto:257:17 + + + > extension[29] > type: +desc_test_complex.proto:257:18 +desc_test_complex.proto:257:22 + + + > extension[29] > name: +desc_test_complex.proto:257:23 +desc_test_complex.proto:257:31 + + + > extension[29] > number: +desc_test_complex.proto:257:34 +desc_test_complex.proto:257:39 + + + > extension[30]: +desc_test_complex.proto:258:9 +desc_test_complex.proto:258:39 + + + > extension[30] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[30] > label: +desc_test_complex.proto:258:9 +desc_test_complex.proto:258:17 + + + > extension[30] > type: +desc_test_complex.proto:258:18 +desc_test_complex.proto:258:22 + + + > extension[30] > name: +desc_test_complex.proto:258:23 +desc_test_complex.proto:258:30 + + + > extension[30] > number: +desc_test_complex.proto:258:33 +desc_test_complex.proto:258:38 + + + > extension[31]: +desc_test_complex.proto:259:9 +desc_test_complex.proto:259:36 + + + > extension[31] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[31] > label: +desc_test_complex.proto:259:9 +desc_test_complex.proto:259:17 + + + > extension[31] > type: +desc_test_complex.proto:259:18 +desc_test_complex.proto:259:22 + + + > extension[31] > name: +desc_test_complex.proto:259:23 +desc_test_complex.proto:259:27 + + + > extension[31] > number: +desc_test_complex.proto:259:30 +desc_test_complex.proto:259:35 + + + > extension[32]: +desc_test_complex.proto:260:9 +desc_test_complex.proto:260:39 + + + > extension[32] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[32] > label: +desc_test_complex.proto:260:9 +desc_test_complex.proto:260:17 + + + > extension[32] > type: +desc_test_complex.proto:260:18 +desc_test_complex.proto:260:22 + + + > extension[32] > name: +desc_test_complex.proto:260:23 +desc_test_complex.proto:260:30 + + + > extension[32] > number: +desc_test_complex.proto:260:33 +desc_test_complex.proto:260:38 + + + > extension[33]: +desc_test_complex.proto:261:9 +desc_test_complex.proto:261:35 + + + > extension[33] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[33] > label: +desc_test_complex.proto:261:9 +desc_test_complex.proto:261:17 + + + > extension[33] > type: +desc_test_complex.proto:261:18 +desc_test_complex.proto:261:22 + + + > extension[33] > name: +desc_test_complex.proto:261:23 +desc_test_complex.proto:261:26 + + + > extension[33] > number: +desc_test_complex.proto:261:29 +desc_test_complex.proto:261:34 + + + > extension[34]: +desc_test_complex.proto:262:9 +desc_test_complex.proto:262:38 + + + > extension[34] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[34] > label: +desc_test_complex.proto:262:9 +desc_test_complex.proto:262:17 + + + > extension[34] > type: +desc_test_complex.proto:262:18 +desc_test_complex.proto:262:22 + + + > extension[34] > name: +desc_test_complex.proto:262:23 +desc_test_complex.proto:262:29 + + + > extension[34] > number: +desc_test_complex.proto:262:32 +desc_test_complex.proto:262:37 + + + > extension[35]: +desc_test_complex.proto:263:9 +desc_test_complex.proto:263:38 + + + > extension[35] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[35] > label: +desc_test_complex.proto:263:9 +desc_test_complex.proto:263:17 + + + > extension[35] > type: +desc_test_complex.proto:263:18 +desc_test_complex.proto:263:22 + + + > extension[35] > name: +desc_test_complex.proto:263:23 +desc_test_complex.proto:263:29 + + + > extension[35] > number: +desc_test_complex.proto:263:32 +desc_test_complex.proto:263:37 + + + > extension[36]: +desc_test_complex.proto:264:9 +desc_test_complex.proto:264:42 + + + > extension[36] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[36] > label: +desc_test_complex.proto:264:9 +desc_test_complex.proto:264:17 + + + > extension[36] > type: +desc_test_complex.proto:264:18 +desc_test_complex.proto:264:22 + + + > extension[36] > name: +desc_test_complex.proto:264:23 +desc_test_complex.proto:264:33 + + + > extension[36] > number: +desc_test_complex.proto:264:36 +desc_test_complex.proto:264:41 + + + > extension[37]: +desc_test_complex.proto:265:9 +desc_test_complex.proto:265:40 + + + > extension[37] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[37] > label: +desc_test_complex.proto:265:9 +desc_test_complex.proto:265:17 + + + > extension[37] > type: +desc_test_complex.proto:265:18 +desc_test_complex.proto:265:22 + + + > extension[37] > name: +desc_test_complex.proto:265:23 +desc_test_complex.proto:265:31 + + + > extension[37] > number: +desc_test_complex.proto:265:34 +desc_test_complex.proto:265:39 + + + > extension[38]: +desc_test_complex.proto:266:9 +desc_test_complex.proto:266:34 + + + > extension[38] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[38] > label: +desc_test_complex.proto:266:9 +desc_test_complex.proto:266:17 + + + > extension[38] > type: +desc_test_complex.proto:266:18 +desc_test_complex.proto:266:22 + + + > extension[38] > name: +desc_test_complex.proto:266:23 +desc_test_complex.proto:266:25 + + + > extension[38] > number: +desc_test_complex.proto:266:28 +desc_test_complex.proto:266:33 + + + > extension[39]: +desc_test_complex.proto:267:9 +desc_test_complex.proto:267:37 + + + > extension[39] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[39] > label: +desc_test_complex.proto:267:9 +desc_test_complex.proto:267:17 + + + > extension[39] > type: +desc_test_complex.proto:267:18 +desc_test_complex.proto:267:23 + + + > extension[39] > name: +desc_test_complex.proto:267:24 +desc_test_complex.proto:267:28 + + + > extension[39] > number: +desc_test_complex.proto:267:31 +desc_test_complex.proto:267:36 + + + > extension[40]: +desc_test_complex.proto:268:9 +desc_test_complex.proto:268:38 + + + > extension[40] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[40] > label: +desc_test_complex.proto:268:9 +desc_test_complex.proto:268:17 + + + > extension[40] > type: +desc_test_complex.proto:268:18 +desc_test_complex.proto:268:23 + + + > extension[40] > name: +desc_test_complex.proto:268:24 +desc_test_complex.proto:268:29 + + + > extension[40] > number: +desc_test_complex.proto:268:32 +desc_test_complex.proto:268:37 + + + > extension[41]: +desc_test_complex.proto:269:9 +desc_test_complex.proto:269:40 + + + > extension[41] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[41] > label: +desc_test_complex.proto:269:9 +desc_test_complex.proto:269:17 + + + > extension[41] > type: +desc_test_complex.proto:269:18 +desc_test_complex.proto:269:23 + + + > extension[41] > name: +desc_test_complex.proto:269:24 +desc_test_complex.proto:269:31 + + + > extension[41] > number: +desc_test_complex.proto:269:34 +desc_test_complex.proto:269:39 + + + > extension[42]: +desc_test_complex.proto:270:9 +desc_test_complex.proto:270:49 + + + > extension[42] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[42] > label: +desc_test_complex.proto:270:9 +desc_test_complex.proto:270:17 + + + > extension[42] > type_name: +desc_test_complex.proto:270:18 +desc_test_complex.proto:270:35 + + + > extension[42] > name: +desc_test_complex.proto:270:36 +desc_test_complex.proto:270:40 + + + > extension[42] > number: +desc_test_complex.proto:270:43 +desc_test_complex.proto:270:48 + + + > message_type[9]: +desc_test_complex.proto:273:1 +desc_test_complex.proto:298:2 + + + > message_type[9] > name: +desc_test_complex.proto:273:9 +desc_test_complex.proto:273:32 + + + > message_type[9] > field[0]: +desc_test_complex.proto:274:9 +desc_test_complex.proto:284:11 + + + > message_type[9] > field[0] > label: +desc_test_complex.proto:274:9 +desc_test_complex.proto:274:17 + + + > message_type[9] > field[0] > type: +desc_test_complex.proto:274:18 +desc_test_complex.proto:274:24 + + + > message_type[9] > field[0] > name: +desc_test_complex.proto:274:25 +desc_test_complex.proto:274:27 + + + > message_type[9] > field[0] > number: +desc_test_complex.proto:274:30 +desc_test_complex.proto:274:31 + + + > message_type[9] > field[0] > options: +desc_test_complex.proto:274:32 +desc_test_complex.proto:284:10 + + + > message_type[9] > field[0] > options > (foo.bar.syntax): +desc_test_complex.proto:275:17 +desc_test_complex.proto:275:32 + + + > message_type[9] > field[0] > options > (foo.bar.import): +desc_test_complex.proto:275:34 +desc_test_complex.proto:275:49 + + + > message_type[9] > field[0] > options > (foo.bar.public): +desc_test_complex.proto:275:51 +desc_test_complex.proto:275:66 + + + > message_type[9] > field[0] > options > (foo.bar.weak): +desc_test_complex.proto:275:68 +desc_test_complex.proto:275:81 + + + > message_type[9] > field[0] > options > (foo.bar.package): +desc_test_complex.proto:275:83 +desc_test_complex.proto:275:99 + + + > message_type[9] > field[0] > options > (foo.bar.string): +desc_test_complex.proto:276:17 +desc_test_complex.proto:276:36 + + + > message_type[9] > field[0] > options > (foo.bar.bytes): +desc_test_complex.proto:276:38 +desc_test_complex.proto:276:55 + + + > message_type[9] > field[0] > options > (foo.bar.bool): +desc_test_complex.proto:276:57 +desc_test_complex.proto:276:70 + + + > message_type[9] > field[0] > options > (foo.bar.float): +desc_test_complex.proto:277:17 +desc_test_complex.proto:277:31 + + + > message_type[9] > field[0] > options > (foo.bar.double): +desc_test_complex.proto:277:33 +desc_test_complex.proto:277:51 + + + > message_type[9] > field[0] > options > (foo.bar.int32): +desc_test_complex.proto:278:17 +desc_test_complex.proto:278:29 + + + > message_type[9] > field[0] > options > (foo.bar.int64): +desc_test_complex.proto:278:31 +desc_test_complex.proto:278:43 + + + > message_type[9] > field[0] > options > (foo.bar.uint32): +desc_test_complex.proto:278:45 +desc_test_complex.proto:278:60 + + + > message_type[9] > field[0] > options > (foo.bar.uint64): +desc_test_complex.proto:278:62 +desc_test_complex.proto:278:77 + + + > message_type[9] > field[0] > options > (foo.bar.sint32): +desc_test_complex.proto:278:79 +desc_test_complex.proto:278:93 + + + > message_type[9] > field[0] > options > (foo.bar.sint64): +desc_test_complex.proto:278:95 +desc_test_complex.proto:278:109 + + + > message_type[9] > field[0] > options > (foo.bar.fixed32): +desc_test_complex.proto:279:17 +desc_test_complex.proto:279:33 + + + > message_type[9] > field[0] > options > (foo.bar.fixed64): +desc_test_complex.proto:279:35 +desc_test_complex.proto:279:51 + + + > message_type[9] > field[0] > options > (foo.bar.sfixed32): +desc_test_complex.proto:279:53 +desc_test_complex.proto:279:71 + + + > message_type[9] > field[0] > options > (foo.bar.sfixed64): +desc_test_complex.proto:279:73 +desc_test_complex.proto:279:91 + + + > message_type[9] > field[0] > options > (foo.bar.optional): +desc_test_complex.proto:280:17 +desc_test_complex.proto:280:34 + + + > message_type[9] > field[0] > options > (foo.bar.repeated): +desc_test_complex.proto:280:36 +desc_test_complex.proto:280:53 + + + > message_type[9] > field[0] > options > (foo.bar.required): +desc_test_complex.proto:280:55 +desc_test_complex.proto:280:72 + + + > message_type[9] > field[0] > options > (foo.bar.message): +desc_test_complex.proto:281:17 +desc_test_complex.proto:281:33 + + + > message_type[9] > field[0] > options > (foo.bar.enum): +desc_test_complex.proto:281:35 +desc_test_complex.proto:281:48 + + + > message_type[9] > field[0] > options > (foo.bar.service): +desc_test_complex.proto:281:50 +desc_test_complex.proto:281:66 + + + > message_type[9] > field[0] > options > (foo.bar.rpc): +desc_test_complex.proto:281:68 +desc_test_complex.proto:281:80 + + + > message_type[9] > field[0] > options > (foo.bar.option): +desc_test_complex.proto:282:17 +desc_test_complex.proto:282:32 + + + > message_type[9] > field[0] > options > (foo.bar.extend): +desc_test_complex.proto:282:34 +desc_test_complex.proto:282:49 + + + > message_type[9] > field[0] > options > (foo.bar.extensions): +desc_test_complex.proto:282:51 +desc_test_complex.proto:282:70 + + + > message_type[9] > field[0] > options > (foo.bar.reserved): +desc_test_complex.proto:282:72 +desc_test_complex.proto:282:89 + + + > message_type[9] > field[0] > options > (foo.bar.to): +desc_test_complex.proto:283:17 +desc_test_complex.proto:283:28 + + + > message_type[9] > field[0] > options > (foo.bar.true): +desc_test_complex.proto:283:30 +desc_test_complex.proto:283:42 + + + > message_type[9] > field[0] > options > (foo.bar.false): +desc_test_complex.proto:283:44 +desc_test_complex.proto:283:58 + + + > message_type[9] > field[0] > options > (foo.bar.default): +desc_test_complex.proto:283:60 +desc_test_complex.proto:283:75 + + + > message_type[9] > field[1]: +desc_test_complex.proto:285:9 +desc_test_complex.proto:297:11 + + + > message_type[9] > field[1] > label: +desc_test_complex.proto:285:9 +desc_test_complex.proto:285:17 + + + > message_type[9] > field[1] > type: +desc_test_complex.proto:285:18 +desc_test_complex.proto:285:24 + + + > message_type[9] > field[1] > name: +desc_test_complex.proto:285:25 +desc_test_complex.proto:285:29 + + + > message_type[9] > field[1] > number: +desc_test_complex.proto:285:32 +desc_test_complex.proto:285:33 + + + > message_type[9] > field[1] > options: +desc_test_complex.proto:285:34 +desc_test_complex.proto:297:10 + + + > message_type[9] > field[1] > options > (foo.bar.boom): +desc_test_complex.proto:286:17 +desc_test_complex.proto:296:18 +---- desc_test_options.proto ---- + + +: +desc_test_options.proto:1:1 +desc_test_options.proto:63:2 + + + > syntax: +desc_test_options.proto:1:1 +desc_test_options.proto:1:19 + + + > options: +desc_test_options.proto:3:1 +desc_test_options.proto:3:73 + + + > options > go_package: +desc_test_options.proto:3:1 +desc_test_options.proto:3:73 + + + > package: +desc_test_options.proto:5:1 +desc_test_options.proto:5:20 + + + > dependency[0]: +desc_test_options.proto:7:1 +desc_test_options.proto:7:43 + + + > extension: +desc_test_options.proto:9:1 +desc_test_options.proto:11:2 + + + > extension[0]: +desc_test_options.proto:10:9 +desc_test_options.proto:10:38 + + + > extension[0] > extendee: +desc_test_options.proto:9:8 +desc_test_options.proto:9:38 + + + > extension[0] > label: +desc_test_options.proto:10:9 +desc_test_options.proto:10:17 + + + > extension[0] > type: +desc_test_options.proto:10:18 +desc_test_options.proto:10:22 + + + > extension[0] > name: +desc_test_options.proto:10:23 +desc_test_options.proto:10:29 + + + > extension[0] > number: +desc_test_options.proto:10:32 +desc_test_options.proto:10:37 + + + > extension: +desc_test_options.proto:13:1 +desc_test_options.proto:16:2 + + + > extension[1]: +desc_test_options.proto:14:9 +desc_test_options.proto:14:40 + + + > extension[1] > extendee: +desc_test_options.proto:13:8 +desc_test_options.proto:13:36 + + + > extension[1] > label: +desc_test_options.proto:14:9 +desc_test_options.proto:14:17 + + + > extension[1] > type: +desc_test_options.proto:14:18 +desc_test_options.proto:14:24 + + + > extension[1] > name: +desc_test_options.proto:14:25 +desc_test_options.proto:14:31 + + + > extension[1] > number: +desc_test_options.proto:14:34 +desc_test_options.proto:14:39 + + + > extension[2]: +desc_test_options.proto:15:9 +desc_test_options.proto:15:40 + + + > extension[2] > extendee: +desc_test_options.proto:13:8 +desc_test_options.proto:13:36 + + + > extension[2] > label: +desc_test_options.proto:15:9 +desc_test_options.proto:15:17 + + + > extension[2] > type: +desc_test_options.proto:15:18 +desc_test_options.proto:15:23 + + + > extension[2] > name: +desc_test_options.proto:15:24 +desc_test_options.proto:15:31 + + + > extension[2] > number: +desc_test_options.proto:15:34 +desc_test_options.proto:15:39 + + + > extension: +desc_test_options.proto:18:1 +desc_test_options.proto:24:2 + + + > extension[3]: +desc_test_options.proto:19:9 +desc_test_options.proto:19:39 + + + > extension[3] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[3] > label: +desc_test_options.proto:19:9 +desc_test_options.proto:19:17 + + + > extension[3] > type: +desc_test_options.proto:19:18 +desc_test_options.proto:19:23 + + + > extension[3] > name: +desc_test_options.proto:19:24 +desc_test_options.proto:19:30 + + + > extension[3] > number: +desc_test_options.proto:19:33 +desc_test_options.proto:19:38 + + + > extension[4]: +desc_test_options.proto:20:9 +desc_test_options.proto:20:41 + + + > extension[4] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[4] > label: +desc_test_options.proto:20:9 +desc_test_options.proto:20:17 + + + > extension[4] > type: +desc_test_options.proto:20:18 +desc_test_options.proto:20:24 + + + > extension[4] > name: +desc_test_options.proto:20:25 +desc_test_options.proto:20:32 + + + > extension[4] > number: +desc_test_options.proto:20:35 +desc_test_options.proto:20:40 + + + > extension[5]: +desc_test_options.proto:21:9 +desc_test_options.proto:21:44 + + + > extension[5] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[5] > label: +desc_test_options.proto:21:9 +desc_test_options.proto:21:17 + + + > extension[5] > type: +desc_test_options.proto:21:18 +desc_test_options.proto:21:26 + + + > extension[5] > name: +desc_test_options.proto:21:27 +desc_test_options.proto:21:35 + + + > extension[5] > number: +desc_test_options.proto:21:38 +desc_test_options.proto:21:43 + + + > extension[6]: +desc_test_options.proto:22:9 +desc_test_options.proto:22:41 + + + > extension[6] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[6] > label: +desc_test_options.proto:22:9 +desc_test_options.proto:22:17 + + + > extension[6] > type: +desc_test_options.proto:22:18 +desc_test_options.proto:22:24 + + + > extension[6] > name: +desc_test_options.proto:22:25 +desc_test_options.proto:22:32 + + + > extension[6] > number: +desc_test_options.proto:22:35 +desc_test_options.proto:22:40 + + + > extension[7]: +desc_test_options.proto:23:9 +desc_test_options.proto:23:43 + + + > extension[7] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[7] > label: +desc_test_options.proto:23:9 +desc_test_options.proto:23:17 + + + > extension[7] > type: +desc_test_options.proto:23:18 +desc_test_options.proto:23:25 + + + > extension[7] > name: +desc_test_options.proto:23:26 +desc_test_options.proto:23:34 + + + > extension[7] > number: +desc_test_options.proto:23:37 +desc_test_options.proto:23:42 + + + > extension: +desc_test_options.proto:26:1 +desc_test_options.proto:32:2 + + + > extension[8]: +desc_test_options.proto:27:9 +desc_test_options.proto:27:40 + + + > extension[8] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[8] > label: +desc_test_options.proto:27:9 +desc_test_options.proto:27:17 + + + > extension[8] > type: +desc_test_options.proto:27:18 +desc_test_options.proto:27:23 + + + > extension[8] > name: +desc_test_options.proto:27:24 +desc_test_options.proto:27:31 + + + > extension[8] > number: +desc_test_options.proto:27:34 +desc_test_options.proto:27:39 + + + > extension[9]: +desc_test_options.proto:28:9 +desc_test_options.proto:28:42 + + + > extension[9] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[9] > label: +desc_test_options.proto:28:9 +desc_test_options.proto:28:17 + + + > extension[9] > type: +desc_test_options.proto:28:18 +desc_test_options.proto:28:24 + + + > extension[9] > name: +desc_test_options.proto:28:25 +desc_test_options.proto:28:33 + + + > extension[9] > number: +desc_test_options.proto:28:36 +desc_test_options.proto:28:41 + + + > extension[10]: +desc_test_options.proto:29:9 +desc_test_options.proto:29:45 + + + > extension[10] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[10] > label: +desc_test_options.proto:29:9 +desc_test_options.proto:29:17 + + + > extension[10] > type: +desc_test_options.proto:29:18 +desc_test_options.proto:29:26 + + + > extension[10] > name: +desc_test_options.proto:29:27 +desc_test_options.proto:29:36 + + + > extension[10] > number: +desc_test_options.proto:29:39 +desc_test_options.proto:29:44 + + + > extension[11]: +desc_test_options.proto:30:9 +desc_test_options.proto:30:42 + + + > extension[11] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[11] > label: +desc_test_options.proto:30:9 +desc_test_options.proto:30:17 + + + > extension[11] > type: +desc_test_options.proto:30:18 +desc_test_options.proto:30:24 + + + > extension[11] > name: +desc_test_options.proto:30:25 +desc_test_options.proto:30:33 + + + > extension[11] > number: +desc_test_options.proto:30:36 +desc_test_options.proto:30:41 + + + > extension[12]: +desc_test_options.proto:31:9 +desc_test_options.proto:31:44 + + + > extension[12] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[12] > label: +desc_test_options.proto:31:9 +desc_test_options.proto:31:17 + + + > extension[12] > type: +desc_test_options.proto:31:18 +desc_test_options.proto:31:25 + + + > extension[12] > name: +desc_test_options.proto:31:26 +desc_test_options.proto:31:35 + + + > extension[12] > number: +desc_test_options.proto:31:38 +desc_test_options.proto:31:43 + + + > extension: +desc_test_options.proto:34:1 +desc_test_options.proto:37:2 + + + > extension[13]: +desc_test_options.proto:35:9 +desc_test_options.proto:35:53 + + + > extension[13] > extendee: +desc_test_options.proto:34:8 +desc_test_options.proto:34:38 + + + > extension[13] > label: +desc_test_options.proto:35:9 +desc_test_options.proto:35:17 + + + > extension[13] > type_name: +desc_test_options.proto:35:18 +desc_test_options.proto:35:37 + + + > extension[13] > name: +desc_test_options.proto:35:38 +desc_test_options.proto:35:44 + + + > extension[13] > number: +desc_test_options.proto:35:47 +desc_test_options.proto:35:52 + + + > extension[14]: +desc_test_options.proto:36:9 +desc_test_options.proto:36:51 + + + > extension[14] > extendee: +desc_test_options.proto:34:8 +desc_test_options.proto:34:38 + + + > extension[14] > label: +desc_test_options.proto:36:9 +desc_test_options.proto:36:17 + + + > extension[14] > type_name: +desc_test_options.proto:36:18 +desc_test_options.proto:36:34 + + + > extension[14] > name: +desc_test_options.proto:36:35 +desc_test_options.proto:36:42 + + + > extension[14] > number: +desc_test_options.proto:36:45 +desc_test_options.proto:36:50 + + + > extension: +desc_test_options.proto:39:1 +desc_test_options.proto:42:2 + + + > extension[15]: +desc_test_options.proto:40:9 +desc_test_options.proto:40:40 + + + > extension[15] > extendee: +desc_test_options.proto:39:8 +desc_test_options.proto:39:37 + + + > extension[15] > label: +desc_test_options.proto:40:9 +desc_test_options.proto:40:17 + + + > extension[15] > type: +desc_test_options.proto:40:18 +desc_test_options.proto:40:23 + + + > extension[15] > name: +desc_test_options.proto:40:24 +desc_test_options.proto:40:31 + + + > extension[15] > number: +desc_test_options.proto:40:34 +desc_test_options.proto:40:39 + + + > extension[16]: +desc_test_options.proto:41:9 +desc_test_options.proto:41:42 + + + > extension[16] > extendee: +desc_test_options.proto:39:8 +desc_test_options.proto:39:37 + + + > extension[16] > label: +desc_test_options.proto:41:9 +desc_test_options.proto:41:17 + + + > extension[16] > type: +desc_test_options.proto:41:18 +desc_test_options.proto:41:24 + + + > extension[16] > name: +desc_test_options.proto:41:25 +desc_test_options.proto:41:33 + + + > extension[16] > number: +desc_test_options.proto:41:36 +desc_test_options.proto:41:41 + + + > message_type[0]: +desc_test_options.proto:45:1 +desc_test_options.proto:48:2 + Leading comments: + Test message used by custom options + + + + > message_type[0] > name: +desc_test_options.proto:45:9 +desc_test_options.proto:45:28 + + + > message_type[0] > field[0]: +desc_test_options.proto:46:9 +desc_test_options.proto:46:32 + + + > message_type[0] > field[0] > label: +desc_test_options.proto:46:9 +desc_test_options.proto:46:17 + + + > message_type[0] > field[0] > type: +desc_test_options.proto:46:18 +desc_test_options.proto:46:24 + + + > message_type[0] > field[0] > name: +desc_test_options.proto:46:25 +desc_test_options.proto:46:27 + + + > message_type[0] > field[0] > number: +desc_test_options.proto:46:30 +desc_test_options.proto:46:31 + + + > message_type[0] > field[1]: +desc_test_options.proto:47:9 +desc_test_options.proto:47:34 + + + > message_type[0] > field[1] > label: +desc_test_options.proto:47:9 +desc_test_options.proto:47:17 + + + > message_type[0] > field[1] > type: +desc_test_options.proto:47:18 +desc_test_options.proto:47:24 + + + > message_type[0] > field[1] > name: +desc_test_options.proto:47:25 +desc_test_options.proto:47:29 + + + > message_type[0] > field[1] > number: +desc_test_options.proto:47:32 +desc_test_options.proto:47:33 + + + > enum_type[0]: +desc_test_options.proto:51:1 +desc_test_options.proto:53:2 + Leading comments: + Test enum used by custom options + + + + > enum_type[0] > name: +desc_test_options.proto:51:6 +desc_test_options.proto:51:22 + + + > enum_type[0] > value[0]: +desc_test_options.proto:52:9 +desc_test_options.proto:52:19 + + + > enum_type[0] > value[0] > name: +desc_test_options.proto:52:9 +desc_test_options.proto:52:14 + + + > enum_type[0] > value[0] > number: +desc_test_options.proto:52:17 +desc_test_options.proto:52:18 + + + > extension: +desc_test_options.proto:55:1 +desc_test_options.proto:58:2 + + + > extension[17]: +desc_test_options.proto:56:9 +desc_test_options.proto:56:41 + + + > extension[17] > extendee: +desc_test_options.proto:55:8 +desc_test_options.proto:55:45 + + + > extension[17] > label: +desc_test_options.proto:56:9 +desc_test_options.proto:56:17 + + + > extension[17] > type: +desc_test_options.proto:56:18 +desc_test_options.proto:56:24 + + + > extension[17] > name: +desc_test_options.proto:56:25 +desc_test_options.proto:56:32 + + + > extension[17] > number: +desc_test_options.proto:56:35 +desc_test_options.proto:56:40 + + + > extension[18]: +desc_test_options.proto:57:9 +desc_test_options.proto:57:41 + + + > extension[18] > extendee: +desc_test_options.proto:55:8 +desc_test_options.proto:55:45 + + + > extension[18] > label: +desc_test_options.proto:57:9 +desc_test_options.proto:57:17 + + + > extension[18] > type: +desc_test_options.proto:57:18 +desc_test_options.proto:57:23 + + + > extension[18] > name: +desc_test_options.proto:57:24 +desc_test_options.proto:57:32 + + + > extension[18] > number: +desc_test_options.proto:57:35 +desc_test_options.proto:57:40 + + + > extension: +desc_test_options.proto:60:1 +desc_test_options.proto:63:2 + + + > extension[19]: +desc_test_options.proto:61:9 +desc_test_options.proto:61:41 + + + > extension[19] > extendee: +desc_test_options.proto:60:8 +desc_test_options.proto:60:36 + + + > extension[19] > label: +desc_test_options.proto:61:9 +desc_test_options.proto:61:17 + + + > extension[19] > type: +desc_test_options.proto:61:18 +desc_test_options.proto:61:24 + + + > extension[19] > name: +desc_test_options.proto:61:25 +desc_test_options.proto:61:32 + + + > extension[19] > number: +desc_test_options.proto:61:35 +desc_test_options.proto:61:40 + + + > extension[20]: +desc_test_options.proto:62:9 +desc_test_options.proto:62:41 + + + > extension[20] > extendee: +desc_test_options.proto:60:8 +desc_test_options.proto:60:36 + + + > extension[20] > label: +desc_test_options.proto:62:9 +desc_test_options.proto:62:17 + + + > extension[20] > type: +desc_test_options.proto:62:18 +desc_test_options.proto:62:23 + + + > extension[20] > name: +desc_test_options.proto:62:24 +desc_test_options.proto:62:32 + + + > extension[20] > number: +desc_test_options.proto:62:35 +desc_test_options.proto:62:40 diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go new file mode 100644 index 00000000..20d2d7a0 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go @@ -0,0 +1,207 @@ +package sourceinfo + +import ( + "math" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/internal" +) + +// NB: forked from google.golang.org/protobuf/internal/filedesc +type sourceLocations struct { + protoreflect.SourceLocations + + orig []*descriptorpb.SourceCodeInfo_Location + // locs is a list of sourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. + locs []protoreflect.SourceLocation + + // fd is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + fd protoreflect.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *sourceLocations) Len() int { return len(p.orig) } +func (p *sourceLocations) Get(i int) protoreflect.SourceLocation { + return p.lazyInit().locs[i] +} +func (p *sourceLocations) byKey(k pathKey) protoreflect.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.locs[i] + } + return protoreflect.SourceLocation{} +} +func (p *sourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *sourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { + if p.fd != nil && desc != nil && p.fd != desc.ParentFile() { + return protoreflect.SourceLocation{} // mismatching parent imports + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case protoreflect.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case protoreflect.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_messagesTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_nestedMessagesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_extensionsTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_extensionsTag)) + default: + return protoreflect.SourceLocation{} + } + } else { + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_fieldsTag)) + default: + return protoreflect.SourceLocation{} + } + } + case protoreflect.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_oneOfsTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_enumsTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_enumsTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.EnumDescriptor: + path = append(path, int32(internal.Enum_valuesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_servicesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.ServiceDescriptor: + path = append(path, int32(internal.Service_methodsTag)) + default: + return protoreflect.SourceLocation{} + } + default: + return protoreflect.SourceLocation{} + } + } +} +func (p *sourceLocations) lazyInit() *sourceLocations { + p.once.Do(func() { + if len(p.orig) > 0 { + p.locs = make([]protoreflect.SourceLocation, len(p.orig)) + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.locs)) + for i := range p.orig { + l := asSourceLocation(p.orig[i]) + p.locs[i] = l + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } + + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.locs)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.locs[idxs[i]].Next = idxs[i+1] + } + p.locs[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} + +func asSourceLocation(l *descriptorpb.SourceCodeInfo_Location) protoreflect.SourceLocation { + endLine := l.Span[0] + endCol := l.Span[2] + if len(l.Span) > 3 { + endLine = l.Span[2] + endCol = l.Span[3] + } + return protoreflect.SourceLocation{ + Path: l.Path, + StartLine: int(l.Span[0]), + StartColumn: int(l.Span[1]), + EndLine: int(endLine), + EndColumn: int(endCol), + LeadingDetachedComments: l.LeadingDetachedComments, + LeadingComments: l.GetLeadingComments(), + TrailingComments: l.GetTrailingComments(), + } +} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p protoreflect.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go new file mode 100644 index 00000000..de38e0d1 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go @@ -0,0 +1,269 @@ +// Package sourceinfo provides the ability to register and query source code info +// for file descriptors that are compiled into the binary. This data is registered +// by code generated from the protoc-gen-gosrcinfo plugin. +// +// The standard descriptors bundled into the compiled binary are stripped of source +// code info, to reduce binary size and reduce runtime memory footprint. However, +// the source code info can be very handy and worth the size cost when used with +// gRPC services and the server reflection service. Without source code info, the +// descriptors that a client downloads from the reflection service have no comments. +// But the presence of comments, and the ability to show them to humans, can greatly +// improve the utility of user agents that use the reflection service. +// +// When the protoc-gen-gosrcinfo plugin is used, the desc.Load* methods, which load +// descriptors for compiled-in elements, will automatically include source code +// info, using the data registered with this package. +// +// In order to make the reflection service use this functionality, you will need to +// be using v1.45 or higher of the Go runtime for gRPC (google.golang.org/grpc). The +// following snippet demonstrates how to do this in your server. Do this instead of +// using the reflection.Register function: +// +// refSvr := reflection.NewServer(reflection.ServerOptions{ +// Services: grpcServer, +// DescriptorResolver: sourceinfo.GlobalFiles, +// ExtensionResolver: sourceinfo.GlobalFiles, +// }) +// grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServer, refSvr) +package sourceinfo + +import ( + "bytes" + "compress/gzip" + "fmt" + "github.com/golang/protobuf/proto" + "io/ioutil" + "sync" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + // GlobalFiles is a registry of descriptors that include source code info, if the + // files they belong to were processed with protoc-gen-gosrcinfo. + // + // If is mean to serve as a drop-in alternative to protoregistry.GlobalFiles that + // can include source code info in the returned descriptors. + GlobalFiles Resolver = registry{} + + // GlobalTypes is a registry of descriptors that include source code info, if the + // files they belong to were processed with protoc-gen-gosrcinfo. + // + // If is mean to serve as a drop-in alternative to protoregistry.GlobalTypes that + // can include source code info in the returned descriptors. + GlobalTypes TypeResolver = registry{} + + mu sync.RWMutex + sourceInfoByFile = map[string]*descriptorpb.SourceCodeInfo{} + fileDescriptors = map[protoreflect.FileDescriptor]protoreflect.FileDescriptor{} +) + +// Resolver can resolve file names into file descriptors and also provides methods for +// resolving extensions. +type Resolver interface { + protodesc.Resolver + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// NB: These interfaces are far from ideal. Ideally, Resolver would have +// * EITHER been named FileResolver and not included the extension methods. +// * OR also included message methods (i.e. embed protoregistry.MessageTypeResolver). +// Now (since it's been released) we can't add the message methods to the interface as +// that's not a backwards-compatible change. So we have to introduce the new interface +// below, which is now a little confusing since it has some overlap with Resolver. + +// TypeResolver can resolve message names and URLs into message descriptors and also +// provides methods for resolving extensions. +type TypeResolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// RegisterSourceInfo registers the given source code info for the file descriptor +// with the given path/name. +// +// This is automatically used from older generated code if using a previous release of +// the protoc-gen-gosrcinfo plugin. +func RegisterSourceInfo(file string, srcInfo *descriptorpb.SourceCodeInfo) { + mu.Lock() + defer mu.Unlock() + sourceInfoByFile[file] = srcInfo +} + +// RegisterEncodedSourceInfo registers the given source code info, which is a serialized +// and gzipped form of a google.protobuf.SourceCodeInfo message. +// +// This is automatically used from generated code if using the protoc-gen-gosrcinfo +// plugin. +func RegisterEncodedSourceInfo(file string, data []byte) error { + zipReader, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return err + } + defer func() { + _ = zipReader.Close() + }() + unzipped, err := ioutil.ReadAll(zipReader) + if err != nil { + return err + } + var srcInfo descriptorpb.SourceCodeInfo + if err := proto.Unmarshal(unzipped, &srcInfo); err != nil { + return err + } + RegisterSourceInfo(file, &srcInfo) + return nil +} + +// SourceInfoForFile queries for any registered source code info for the file +// descriptor with the given path/name. It returns nil if no source code info +// was registered. +func SourceInfoForFile(file string) *descriptorpb.SourceCodeInfo { + mu.RLock() + defer mu.RUnlock() + return sourceInfoByFile[file] +} + +func canWrap(d protoreflect.Descriptor) bool { + srcInfo := SourceInfoForFile(d.ParentFile().Path()) + return len(srcInfo.GetLocation()) > 0 +} + +func getFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor { + if fd == nil { + return nil + } + + mu.RLock() + result := fileDescriptors[fd] + mu.RUnlock() + + if result != nil { + return result + } + + mu.Lock() + defer mu.Unlock() + // double-check, in case it was added to map while upgrading lock + result = fileDescriptors[fd] + if result != nil { + return result + } + + srcInfo := sourceInfoByFile[fd.Path()] + if len(srcInfo.GetLocation()) > 0 { + result = &fileDescriptor{ + FileDescriptor: fd, + locs: &sourceLocations{ + orig: srcInfo.Location, + }, + } + } else { + // nothing to do; don't bother wrapping + result = fd + } + fileDescriptors[fd] = result + return result +} + +type registry struct{} + +var _ protodesc.Resolver = ®istry{} + +func (r registry) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + fd, err := protoregistry.GlobalFiles.FindFileByPath(path) + if err != nil { + return nil, err + } + return getFile(fd), nil +} + +func (r registry) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + d, err := protoregistry.GlobalFiles.FindDescriptorByName(name) + if !canWrap(d) { + return d, nil + } + if err != nil { + return nil, err + } + switch d := d.(type) { + case protoreflect.FileDescriptor: + return getFile(d), nil + case protoreflect.MessageDescriptor: + return messageDescriptor{d}, nil + case protoreflect.ExtensionTypeDescriptor: + return extensionDescriptor{d}, nil + case protoreflect.FieldDescriptor: + return fieldDescriptor{d}, nil + case protoreflect.OneofDescriptor: + return oneOfDescriptor{d}, nil + case protoreflect.EnumDescriptor: + return enumDescriptor{d}, nil + case protoreflect.EnumValueDescriptor: + return enumValueDescriptor{d}, nil + case protoreflect.ServiceDescriptor: + return serviceDescriptor{d}, nil + case protoreflect.MethodDescriptor: + return methodDescriptor{d}, nil + default: + return nil, fmt.Errorf("unrecognized descriptor type: %T", d) + } +} + +func (r registry) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + mt, err := protoregistry.GlobalTypes.FindMessageByName(message) + if err != nil { + return nil, err + } + if !canWrap(mt.Descriptor()) { + return mt, nil + } + return messageType{mt}, nil +} + +func (r registry) FindMessageByURL(url string) (protoreflect.MessageType, error) { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return nil, err + } + if !canWrap(mt.Descriptor()) { + return mt, nil + } + return messageType{mt}, nil +} + +func (r registry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + xt, err := protoregistry.GlobalTypes.FindExtensionByName(field) + if err != nil { + return nil, err + } + if !canWrap(xt.TypeDescriptor()) { + return xt, nil + } + return extensionType{xt}, nil +} + +func (r registry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + xt, err := protoregistry.GlobalTypes.FindExtensionByNumber(message, field) + if err != nil { + return nil, err + } + if !canWrap(xt.TypeDescriptor()) { + return xt, nil + } + return extensionType{xt}, nil +} + +func (r registry) RangeExtensionsByMessage(message protoreflect.FullName, fn func(protoreflect.ExtensionType) bool) { + protoregistry.GlobalTypes.RangeExtensionsByMessage(message, func(xt protoreflect.ExtensionType) bool { + if canWrap(xt.TypeDescriptor()) { + xt = extensionType{xt} + } + return fn(xt) + }) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go new file mode 100644 index 00000000..8cbb5bbb --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go @@ -0,0 +1,629 @@ +package sourceinfo + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// These are wrappers around the various interfaces in the +// google.golang.org/protobuf/reflect/protoreflect that all +// make sure to return a FileDescriptor that includes source +// code info. + +type fileDescriptor struct { + protoreflect.FileDescriptor + locs protoreflect.SourceLocations +} + +func (f fileDescriptor) ParentFile() protoreflect.FileDescriptor { + return f +} + +func (f fileDescriptor) Parent() protoreflect.Descriptor { + return nil +} + +func (f fileDescriptor) Imports() protoreflect.FileImports { + return imports{f.FileDescriptor.Imports()} +} + +func (f fileDescriptor) Messages() protoreflect.MessageDescriptors { + return messages{f.FileDescriptor.Messages()} +} + +func (f fileDescriptor) Enums() protoreflect.EnumDescriptors { + return enums{f.FileDescriptor.Enums()} +} + +func (f fileDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return extensions{f.FileDescriptor.Extensions()} +} + +func (f fileDescriptor) Services() protoreflect.ServiceDescriptors { + return services{f.FileDescriptor.Services()} +} + +func (f fileDescriptor) SourceLocations() protoreflect.SourceLocations { + return f.locs +} + +type imports struct { + protoreflect.FileImports +} + +func (im imports) Get(i int) protoreflect.FileImport { + fi := im.FileImports.Get(i) + return protoreflect.FileImport{ + FileDescriptor: getFile(fi.FileDescriptor), + IsPublic: fi.IsPublic, + IsWeak: fi.IsWeak, + } +} + +type messages struct { + protoreflect.MessageDescriptors +} + +func (m messages) Get(i int) protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageDescriptors.Get(i)} +} + +func (m messages) ByName(n protoreflect.Name) protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageDescriptors.ByName(n)} +} + +type enums struct { + protoreflect.EnumDescriptors +} + +func (e enums) Get(i int) protoreflect.EnumDescriptor { + return enumDescriptor{e.EnumDescriptors.Get(i)} +} + +func (e enums) ByName(n protoreflect.Name) protoreflect.EnumDescriptor { + return enumDescriptor{e.EnumDescriptors.ByName(n)} +} + +type extensions struct { + protoreflect.ExtensionDescriptors +} + +func (e extensions) Get(i int) protoreflect.ExtensionDescriptor { + d := e.ExtensionDescriptors.Get(i) + if ed, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extensionDescriptor{ed} + } + return fieldDescriptor{d} +} + +func (e extensions) ByName(n protoreflect.Name) protoreflect.ExtensionDescriptor { + d := e.ExtensionDescriptors.ByName(n) + if ed, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extensionDescriptor{ed} + } + return fieldDescriptor{d} +} + +type services struct { + protoreflect.ServiceDescriptors +} + +func (s services) Get(i int) protoreflect.ServiceDescriptor { + return serviceDescriptor{s.ServiceDescriptors.Get(i)} +} + +func (s services) ByName(n protoreflect.Name) protoreflect.ServiceDescriptor { + return serviceDescriptor{s.ServiceDescriptors.ByName(n)} +} + +type messageDescriptor struct { + protoreflect.MessageDescriptor +} + +func (m messageDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(m.MessageDescriptor.ParentFile()) +} + +func (m messageDescriptor) Parent() protoreflect.Descriptor { + d := m.MessageDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (m messageDescriptor) Fields() protoreflect.FieldDescriptors { + return fields{m.MessageDescriptor.Fields()} +} + +func (m messageDescriptor) Oneofs() protoreflect.OneofDescriptors { + return oneOfs{m.MessageDescriptor.Oneofs()} +} + +func (m messageDescriptor) Enums() protoreflect.EnumDescriptors { + return enums{m.MessageDescriptor.Enums()} +} + +func (m messageDescriptor) Messages() protoreflect.MessageDescriptors { + return messages{m.MessageDescriptor.Messages()} +} + +func (m messageDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return extensions{m.MessageDescriptor.Extensions()} +} + +type fields struct { + protoreflect.FieldDescriptors +} + +func (f fields) Get(i int) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.Get(i)} +} + +func (f fields) ByName(n protoreflect.Name) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByName(n)} +} + +func (f fields) ByJSONName(n string) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByJSONName(n)} +} + +func (f fields) ByTextName(n string) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByTextName(n)} +} + +func (f fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByNumber(n)} +} + +type oneOfs struct { + protoreflect.OneofDescriptors +} + +func (o oneOfs) Get(i int) protoreflect.OneofDescriptor { + return oneOfDescriptor{o.OneofDescriptors.Get(i)} +} + +func (o oneOfs) ByName(n protoreflect.Name) protoreflect.OneofDescriptor { + return oneOfDescriptor{o.OneofDescriptors.ByName(n)} +} + +type fieldDescriptor struct { + protoreflect.FieldDescriptor +} + +func (f fieldDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(f.FieldDescriptor.ParentFile()) +} + +func (f fieldDescriptor) Parent() protoreflect.Descriptor { + d := f.FieldDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (f fieldDescriptor) MapKey() protoreflect.FieldDescriptor { + fd := f.FieldDescriptor.MapKey() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (f fieldDescriptor) MapValue() protoreflect.FieldDescriptor { + fd := f.FieldDescriptor.MapValue() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (f fieldDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := f.FieldDescriptor.DefaultEnumValue() + if ed == nil { + return nil + } + return enumValueDescriptor{ed} +} + +func (f fieldDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + od := f.FieldDescriptor.ContainingOneof() + if od == nil { + return nil + } + return oneOfDescriptor{od} +} + +func (f fieldDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + return messageDescriptor{f.FieldDescriptor.ContainingMessage()} +} + +func (f fieldDescriptor) Enum() protoreflect.EnumDescriptor { + ed := f.FieldDescriptor.Enum() + if ed == nil { + return nil + } + return enumDescriptor{ed} +} + +func (f fieldDescriptor) Message() protoreflect.MessageDescriptor { + md := f.FieldDescriptor.Message() + if md == nil { + return nil + } + return messageDescriptor{md} +} + +type oneOfDescriptor struct { + protoreflect.OneofDescriptor +} + +func (o oneOfDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(o.OneofDescriptor.ParentFile()) +} + +func (o oneOfDescriptor) Parent() protoreflect.Descriptor { + d := o.OneofDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (o oneOfDescriptor) Fields() protoreflect.FieldDescriptors { + return fields{o.OneofDescriptor.Fields()} +} + +type enumDescriptor struct { + protoreflect.EnumDescriptor +} + +func (e enumDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.EnumDescriptor.ParentFile()) +} + +func (e enumDescriptor) Parent() protoreflect.Descriptor { + d := e.EnumDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (e enumDescriptor) Values() protoreflect.EnumValueDescriptors { + return enumValues{e.EnumDescriptor.Values()} +} + +type enumValues struct { + protoreflect.EnumValueDescriptors +} + +func (e enumValues) Get(i int) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.Get(i)} +} + +func (e enumValues) ByName(n protoreflect.Name) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.ByName(n)} +} + +func (e enumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.ByNumber(n)} +} + +type enumValueDescriptor struct { + protoreflect.EnumValueDescriptor +} + +func (e enumValueDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.EnumValueDescriptor.ParentFile()) +} + +func (e enumValueDescriptor) Parent() protoreflect.Descriptor { + d := e.EnumValueDescriptor.Parent() + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return enumDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +type extensionDescriptor struct { + protoreflect.ExtensionTypeDescriptor +} + +func (e extensionDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.ExtensionTypeDescriptor.ParentFile()) +} + +func (e extensionDescriptor) Parent() protoreflect.Descriptor { + d := e.ExtensionTypeDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (e extensionDescriptor) MapKey() protoreflect.FieldDescriptor { + fd := e.ExtensionTypeDescriptor.MapKey() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (e extensionDescriptor) MapValue() protoreflect.FieldDescriptor { + fd := e.ExtensionTypeDescriptor.MapValue() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (e extensionDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := e.ExtensionTypeDescriptor.DefaultEnumValue() + if ed == nil { + return nil + } + return enumValueDescriptor{ed} +} + +func (e extensionDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + od := e.ExtensionTypeDescriptor.ContainingOneof() + if od == nil { + return nil + } + return oneOfDescriptor{od} +} + +func (e extensionDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + return messageDescriptor{e.ExtensionTypeDescriptor.ContainingMessage()} +} + +func (e extensionDescriptor) Enum() protoreflect.EnumDescriptor { + ed := e.ExtensionTypeDescriptor.Enum() + if ed == nil { + return nil + } + return enumDescriptor{ed} +} + +func (e extensionDescriptor) Message() protoreflect.MessageDescriptor { + md := e.ExtensionTypeDescriptor.Message() + if md == nil { + return nil + } + return messageDescriptor{md} +} + +func (e extensionDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return e +} + +var _ protoreflect.ExtensionTypeDescriptor = extensionDescriptor{} + +type serviceDescriptor struct { + protoreflect.ServiceDescriptor +} + +func (s serviceDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(s.ServiceDescriptor.ParentFile()) +} + +func (s serviceDescriptor) Parent() protoreflect.Descriptor { + d := s.ServiceDescriptor.Parent() + switch d := d.(type) { + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (s serviceDescriptor) Methods() protoreflect.MethodDescriptors { + return methods{s.ServiceDescriptor.Methods()} +} + +type methods struct { + protoreflect.MethodDescriptors +} + +func (m methods) Get(i int) protoreflect.MethodDescriptor { + return methodDescriptor{m.MethodDescriptors.Get(i)} +} + +func (m methods) ByName(n protoreflect.Name) protoreflect.MethodDescriptor { + return methodDescriptor{m.MethodDescriptors.ByName(n)} +} + +type methodDescriptor struct { + protoreflect.MethodDescriptor +} + +func (m methodDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(m.MethodDescriptor.ParentFile()) +} + +func (m methodDescriptor) Parent() protoreflect.Descriptor { + d := m.MethodDescriptor.Parent() + switch d := d.(type) { + case protoreflect.ServiceDescriptor: + return serviceDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (m methodDescriptor) Input() protoreflect.MessageDescriptor { + return messageDescriptor{m.MethodDescriptor.Input()} +} + +func (m methodDescriptor) Output() protoreflect.MessageDescriptor { + return messageDescriptor{m.MethodDescriptor.Output()} +} + +type extensionType struct { + protoreflect.ExtensionType +} + +func (e extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + return extensionDescriptor{e.ExtensionType.TypeDescriptor()} +} + +type messageType struct { + protoreflect.MessageType +} + +func (m messageType) Descriptor() protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageType.Descriptor()} +} + +// WrapFile wraps the given file descriptor so that it will include source +// code info that was registered with this package if the given file was +// processed with protoc-gen-gosrcinfo. Returns fd without wrapping if fd +// already contains source code info. +func WrapFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor { + if wrapper, ok := fd.(fileDescriptor); ok { + // already wrapped + return wrapper + } + if fd.SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return fd + } + return getFile(fd) +} + +// WrapMessage wraps the given message descriptor so that it will include source +// code info that was registered with this package if the file it is defined in +// was processed with protoc-gen-gosrcinfo. Returns md without wrapping if md's +// parent file already contains source code info. +func WrapMessage(md protoreflect.MessageDescriptor) protoreflect.MessageDescriptor { + if wrapper, ok := md.(messageDescriptor); ok { + // already wrapped + return wrapper + } + if md.ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return md + } + if !canWrap(md) { + return md + } + return messageDescriptor{md} +} + +// WrapEnum wraps the given enum descriptor so that it will include source +// code info that was registered with this package if the file it is defined in +// was processed with protoc-gen-gosrcinfo. Returns ed without wrapping if ed's +// parent file already contains source code info. +func WrapEnum(ed protoreflect.EnumDescriptor) protoreflect.EnumDescriptor { + if wrapper, ok := ed.(enumDescriptor); ok { + // already wrapped + return wrapper + } + if ed.ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return ed + } + if !canWrap(ed) { + return ed + } + return enumDescriptor{ed} +} + +// WrapService wraps the given service descriptor so that it will include source +// code info that was registered with this package if the file it is defined in +// was processed with protoc-gen-gosrcinfo. Returns sd without wrapping if sd's +// parent file already contains source code info. +func WrapService(sd protoreflect.ServiceDescriptor) protoreflect.ServiceDescriptor { + if wrapper, ok := sd.(serviceDescriptor); ok { + // already wrapped + return wrapper + } + if sd.ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return sd + } + if !canWrap(sd) { + return sd + } + return serviceDescriptor{sd} +} + +// WrapExtensionType wraps the given extension type so that its associated +// descriptor will include source code info that was registered with this package +// if the file it is defined in was processed with protoc-gen-gosrcinfo. Returns +// xt without wrapping if the parent file of xt's descriptor already contains +// source code info. +func WrapExtensionType(xt protoreflect.ExtensionType) protoreflect.ExtensionType { + if wrapper, ok := xt.(extensionType); ok { + // already wrapped + return wrapper + } + if xt.TypeDescriptor().ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return xt + } + if !canWrap(xt.TypeDescriptor()) { + return xt + } + return extensionType{xt} +} + +// WrapMessageType wraps the given message type so that its associated +// descriptor will include source code info that was registered with this package +// if the file it is defined in was processed with protoc-gen-gosrcinfo. Returns +// mt without wrapping if the parent file of mt's descriptor already contains +// source code info. +func WrapMessageType(mt protoreflect.MessageType) protoreflect.MessageType { + if wrapper, ok := mt.(messageType); ok { + // already wrapped + return wrapper + } + if mt.Descriptor().ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return mt + } + if !canWrap(mt.Descriptor()) { + return mt + } + return messageType{mt} +} diff --git a/vendor/github.com/jhump/protoreflect/desc/wrap.go b/vendor/github.com/jhump/protoreflect/desc/wrap.go new file mode 100644 index 00000000..82610a45 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/wrap.go @@ -0,0 +1,208 @@ +package desc + +import ( + "fmt" + + "github.com/bufbuild/protocompile/protoutil" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// DescriptorWrapper wraps a protoreflect.Descriptor. All of the Descriptor +// implementations in this package implement this interface. This can be +// used to recover the underlying descriptor. Each descriptor type in this +// package also provides a strongly-typed form of this method, such as the +// following method for *FileDescriptor: +// +// UnwrapFile() protoreflect.FileDescriptor +type DescriptorWrapper interface { + Unwrap() protoreflect.Descriptor +} + +// WrapDescriptor wraps the given descriptor, returning a desc.Descriptor +// value that represents the same element. +func WrapDescriptor(d protoreflect.Descriptor) (Descriptor, error) { + return wrapDescriptor(d, noopCache{}) +} + +func wrapDescriptor(d protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) { + switch d := d.(type) { + case protoreflect.FileDescriptor: + return wrapFile(d, cache) + case protoreflect.MessageDescriptor: + return wrapMessage(d, cache) + case protoreflect.FieldDescriptor: + return wrapField(d, cache) + case protoreflect.OneofDescriptor: + return wrapOneOf(d, cache) + case protoreflect.EnumDescriptor: + return wrapEnum(d, cache) + case protoreflect.EnumValueDescriptor: + return wrapEnumValue(d, cache) + case protoreflect.ServiceDescriptor: + return wrapService(d, cache) + case protoreflect.MethodDescriptor: + return wrapMethod(d, cache) + default: + return nil, fmt.Errorf("unknown descriptor type: %T", d) + } +} + +// WrapFiles wraps the given file descriptors, returning a slice of *desc.FileDescriptor +// values that represent the same files. +func WrapFiles(d []protoreflect.FileDescriptor) ([]*FileDescriptor, error) { + cache := mapCache{} + results := make([]*FileDescriptor, len(d)) + for i := range d { + var err error + results[i], err = wrapFile(d[i], cache) + if err != nil { + return nil, err + } + } + return results, nil +} + +// WrapFile wraps the given file descriptor, returning a *desc.FileDescriptor +// value that represents the same file. +func WrapFile(d protoreflect.FileDescriptor) (*FileDescriptor, error) { + return wrapFile(d, noopCache{}) +} + +func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescriptor, error) { + fdp := protoutil.ProtoFromFileDescriptor(d) + return convertFile(d, fdp, cache) +} + +// WrapMessage wraps the given message descriptor, returning a *desc.MessageDescriptor +// value that represents the same message. +func WrapMessage(d protoreflect.MessageDescriptor) (*MessageDescriptor, error) { + return wrapMessage(d, noopCache{}) +} + +func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*MessageDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + switch p := parent.(type) { + case *FileDescriptor: + return p.messages[d.Index()], nil + case *MessageDescriptor: + return p.nested[d.Index()], nil + default: + return nil, fmt.Errorf("message has unexpected parent type: %T", parent) + } +} + +// WrapField wraps the given field descriptor, returning a *desc.FieldDescriptor +// value that represents the same field. +func WrapField(d protoreflect.FieldDescriptor) (*FieldDescriptor, error) { + return wrapField(d, noopCache{}) +} + +func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + switch p := parent.(type) { + case *FileDescriptor: + return p.extensions[d.Index()], nil + case *MessageDescriptor: + if d.IsExtension() { + return p.extensions[d.Index()], nil + } + return p.fields[d.Index()], nil + default: + return nil, fmt.Errorf("field has unexpected parent type: %T", parent) + } +} + +// WrapOneOf wraps the given oneof descriptor, returning a *desc.OneOfDescriptor +// value that represents the same oneof. +func WrapOneOf(d protoreflect.OneofDescriptor) (*OneOfDescriptor, error) { + return wrapOneOf(d, noopCache{}) +} + +func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*MessageDescriptor); ok { + return p.oneOfs[d.Index()], nil + } + return nil, fmt.Errorf("oneof has unexpected parent type: %T", parent) +} + +// WrapEnum wraps the given enum descriptor, returning a *desc.EnumDescriptor +// value that represents the same enum. +func WrapEnum(d protoreflect.EnumDescriptor) (*EnumDescriptor, error) { + return wrapEnum(d, noopCache{}) +} + +func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + switch p := parent.(type) { + case *FileDescriptor: + return p.enums[d.Index()], nil + case *MessageDescriptor: + return p.enums[d.Index()], nil + default: + return nil, fmt.Errorf("enum has unexpected parent type: %T", parent) + } +} + +// WrapEnumValue wraps the given enum value descriptor, returning a *desc.EnumValueDescriptor +// value that represents the same enum value. +func WrapEnumValue(d protoreflect.EnumValueDescriptor) (*EnumValueDescriptor, error) { + return wrapEnumValue(d, noopCache{}) +} + +func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (*EnumValueDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*EnumDescriptor); ok { + return p.values[d.Index()], nil + } + return nil, fmt.Errorf("enum value has unexpected parent type: %T", parent) +} + +// WrapService wraps the given service descriptor, returning a *desc.ServiceDescriptor +// value that represents the same service. +func WrapService(d protoreflect.ServiceDescriptor) (*ServiceDescriptor, error) { + return wrapService(d, noopCache{}) +} + +func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*ServiceDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*FileDescriptor); ok { + return p.services[d.Index()], nil + } + return nil, fmt.Errorf("service has unexpected parent type: %T", parent) +} + +// WrapMethod wraps the given method descriptor, returning a *desc.MethodDescriptor +// value that represents the same method. +func WrapMethod(d protoreflect.MethodDescriptor) (*MethodDescriptor, error) { + return wrapMethod(d, noopCache{}) +} + +func wrapMethod(d protoreflect.MethodDescriptor, cache descriptorCache) (*MethodDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*ServiceDescriptor); ok { + return p.methods[d.Index()], nil + } + return nil, fmt.Errorf("method has unexpected parent type: %T", parent) +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go new file mode 100644 index 00000000..39e077a4 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go @@ -0,0 +1,193 @@ +package dynamic + +// Binary serialization and de-serialization for dynamic messages + +import ( + "fmt" + "io" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/codec" +) + +// defaultDeterminism, if true, will mean that calls to Marshal will produce +// deterministic output. This is used to make the output of proto.Marshal(...) +// deterministic (since there is no way to have that convey determinism intent). +// **This is only used from tests.** +var defaultDeterminism = false + +// Marshal serializes this message to bytes, returning an error if the operation +// fails. The resulting bytes are in the standard protocol buffer binary format. +func (m *Message) Marshal() ([]byte, error) { + var b codec.Buffer + b.SetDeterministic(defaultDeterminism) + if err := m.marshal(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a +// new byte slice to marshal into, it uses the provided byte slice. The backing array +// for the returned byte slice *may* be the same as the one that was passed in, but +// it's not guaranteed as a new backing array will automatically be allocated if +// more bytes need to be written than the provided buffer has capacity for. +func (m *Message) MarshalAppend(b []byte) ([]byte, error) { + codedBuf := codec.NewBuffer(b) + codedBuf.SetDeterministic(defaultDeterminism) + if err := m.marshal(codedBuf); err != nil { + return nil, err + } + return codedBuf.Bytes(), nil +} + +// MarshalDeterministic serializes this message to bytes in a deterministic way, +// returning an error if the operation fails. This differs from Marshal in that +// map keys will be sorted before serializing to bytes. The protobuf spec does +// not define ordering for map entries, so Marshal will use standard Go map +// iteration order (which will be random). But for cases where determinism is +// more important than performance, use this method instead. +func (m *Message) MarshalDeterministic() ([]byte, error) { + var b codec.Buffer + b.SetDeterministic(true) + if err := m.marshal(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// MarshalAppendDeterministic behaves exactly the same as MarshalDeterministic, +// except instead of allocating a new byte slice to marshal into, it uses the +// provided byte slice. The backing array for the returned byte slice *may* be +// the same as the one that was passed in, but it's not guaranteed as a new +// backing array will automatically be allocated if more bytes need to be written +// than the provided buffer has capacity for. +func (m *Message) MarshalAppendDeterministic(b []byte) ([]byte, error) { + codedBuf := codec.NewBuffer(b) + codedBuf.SetDeterministic(true) + if err := m.marshal(codedBuf); err != nil { + return nil, err + } + return codedBuf.Bytes(), nil +} + +func (m *Message) marshal(b *codec.Buffer) error { + if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() { + return fmt.Errorf("%s is a message set; marshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName()) + } + if err := m.marshalKnownFields(b); err != nil { + return err + } + return m.marshalUnknownFields(b) +} + +func (m *Message) marshalKnownFields(b *codec.Buffer) error { + for _, tag := range m.knownFieldTags() { + itag := int32(tag) + val := m.values[itag] + fd := m.FindFieldDescriptor(itag) + if fd == nil { + panic(fmt.Sprintf("Couldn't find field for tag %d", itag)) + } + if err := b.EncodeFieldValue(fd, val); err != nil { + return err + } + } + return nil +} + +func (m *Message) marshalUnknownFields(b *codec.Buffer) error { + for _, tag := range m.unknownFieldTags() { + itag := int32(tag) + sl := m.unknownFields[itag] + for _, u := range sl { + if err := b.EncodeTagAndWireType(itag, u.Encoding); err != nil { + return err + } + switch u.Encoding { + case proto.WireBytes: + if err := b.EncodeRawBytes(u.Contents); err != nil { + return err + } + case proto.WireStartGroup: + _, _ = b.Write(u.Contents) + if err := b.EncodeTagAndWireType(itag, proto.WireEndGroup); err != nil { + return err + } + case proto.WireFixed32: + if err := b.EncodeFixed32(u.Value); err != nil { + return err + } + case proto.WireFixed64: + if err := b.EncodeFixed64(u.Value); err != nil { + return err + } + case proto.WireVarint: + if err := b.EncodeVarint(u.Value); err != nil { + return err + } + default: + return codec.ErrBadWireType + } + } + } + return nil +} + +// Unmarshal de-serializes the message that is present in the given bytes into +// this message. It first resets the current message. It returns an error if the +// given bytes do not contain a valid encoding of this message type. +func (m *Message) Unmarshal(b []byte) error { + m.Reset() + if err := m.UnmarshalMerge(b); err != nil { + return err + } + return m.Validate() +} + +// UnmarshalMerge de-serializes the message that is present in the given bytes +// into this message. Unlike Unmarshal, it does not first reset the message, +// instead merging the data in the given bytes into the existing data in this +// message. +func (m *Message) UnmarshalMerge(b []byte) error { + return m.unmarshal(codec.NewBuffer(b), false) +} + +func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error { + if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() { + return fmt.Errorf("%s is a message set; unmarshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName()) + } + for !buf.EOF() { + fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf) + if err != nil { + if err == codec.ErrWireTypeEndGroup { + if isGroup { + // finished parsing group + return nil + } + return codec.ErrBadWireType + } + return err + } + + if fd == nil { + if m.unknownFields == nil { + m.unknownFields = map[int32][]UnknownField{} + } + uv := val.(codec.UnknownField) + u := UnknownField{ + Encoding: uv.Encoding, + Value: uv.Value, + Contents: uv.Contents, + } + m.unknownFields[uv.Tag] = append(m.unknownFields[uv.Tag], u) + } else if err := mergeField(m, fd, val); err != nil { + return err + } + } + if isGroup { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go new file mode 100644 index 00000000..5d7f45e4 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go @@ -0,0 +1,159 @@ +// Package dynamic provides an implementation for a dynamic protobuf message. +// +// The dynamic message is essentially a message descriptor along with a map of +// tag numbers to values. It has a broad API for interacting with the message, +// including inspection and modification. Generally, most operations have two +// forms: a regular method that panics on bad input or error and a "Try" form +// of the method that will instead return an error. +// +// A dynamic message can optionally be constructed with a MessageFactory. The +// MessageFactory has various registries that may be used by the dynamic message, +// such as during de-serialization. The message factory is "inherited" by any +// other dynamic messages created, such as nested messages that are created +// during de-serialization. Similarly, any dynamic message created using +// MessageFactory.NewMessage will be associated with that factory, which in turn +// will be used to create other messages or parse extension fields during +// de-serialization. +// +// # Field Types +// +// The types of values expected by setters and returned by getters are the +// same as protoc generates for scalar fields. For repeated fields, there are +// methods for getting and setting values at a particular index or for adding +// an element. Similarly, for map fields, there are methods for getting and +// setting values for a particular key. +// +// If you use GetField for a repeated field, it will return a copy of all +// elements as a slice []interface{}. Similarly, using GetField for a map field +// will return a copy of all mappings as a map[interface{}]interface{}. You can +// also use SetField to supply an entire slice or map for repeated or map fields. +// The slice need not be []interface{} but can actually be typed according to +// the field's expected type. For example, a repeated uint64 field can be set +// using a slice of type []uint64. +// +// Descriptors for map fields describe them as repeated fields with a nested +// message type. The nested message type is a special generated type that +// represents a single mapping: key and value pair. The dynamic message has some +// special affordances for this representation. For example, you can use +// SetField to set a map field using a slice of these entry messages. Internally, +// the slice of entries will be converted to an actual map. Similarly, you can +// use AddRepeatedField with an entry message to add (or overwrite) a mapping. +// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps, +// since those take numeric index arguments which are not relevant to maps +// (since maps in Go have no defined ordering). +// +// When setting field values in dynamic messages, the type-checking is lenient +// in that it accepts any named type with the right kind. So a string field can +// be assigned to any type that is defined as a string. Enum fields require +// int32 values (or any type that is defined as an int32). +// +// Unlike normal use of numeric values in Go, values will be automatically +// widened when assigned. So, for example, an int64 field can be set using an +// int32 value since it can be safely widened without truncation or loss of +// precision. Similar goes for uint32 values being converted to uint64 and +// float32 being converted to float64. Narrowing conversions are not done, +// however. Also, unsigned values will never be automatically converted to +// signed (and vice versa), and floating point values will never be +// automatically converted to integral values (and vice versa). Since the bit +// width of int and uint fields is allowed to be platform dependent, but will +// always be less than or equal to 64, they can only be used as values for +// int64 and uint64 fields, respectively. They cannot be used to set int32 or +// uint32 fields, which includes enums fields. +// +// Fields whose type is a nested message can have values set to either other +// dynamic messages or generated messages (e.g. pointers to structs generated by +// protoc). Getting a value for such a field will return the actual type it is +// set to (e.g. either a dynamic message or a generated message). If the value +// is not set and the message uses proto2 syntax, the default message returned +// will be whatever is returned by the dynamic message's MessageFactory (if the +// dynamic message was not created with a factory, it will use the logic of the +// zero value factory). In most typical cases, it will return a dynamic message, +// but if the factory is configured with a KnownTypeRegistry, or if the field's +// type is a well-known type, it will return a zero value generated message. +// +// # Unrecognized Fields +// +// Unrecognized fields are preserved by the dynamic message when unmarshaling +// from the standard binary format. If the message's MessageFactory was +// configured with an ExtensionRegistry, it will be used to identify and parse +// extension fields for the message. +// +// Unrecognized fields can dynamically become recognized fields if the +// application attempts to retrieve an unrecognized field's value using a +// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the +// unknown field and move the parsed value into the message's set of known +// fields. This behavior is most suited to the use of extensions, where an +// ExtensionRegistry is not setup with all known extensions ahead of time. But +// it can even happen for non-extension fields! Here's an example scenario where +// a non-extension field can initially be unknown and become known: +// +// 1. A dynamic message is created with a descriptor, A, and then +// de-serialized from a stream of bytes. The stream includes an +// unrecognized tag T. The message will include tag T in its unrecognized +// field set. +// 2. Another call site retrieves a newer descriptor, A', which includes a +// newly added field with tag T. +// 3. That other call site then uses a FieldDescriptor to access the value of +// the new field. This will cause the dynamic message to parse the bytes +// for the unknown tag T and store them as a known field. +// 4. Subsequent operations for tag T, including setting the field using only +// tag number or de-serializing a stream that includes tag T, will operate +// as if that tag were part of the original descriptor, A. +// +// # Compatibility +// +// In addition to implementing the proto.Message interface, the included +// Message type also provides an XXX_MessageName() method, so it can work with +// proto.MessageName. And it provides a Descriptor() method that behaves just +// like the method of the same signature in messages generated by protoc. +// Because of this, it is actually compatible with proto.Message in many (though +// not all) contexts. In particular, it is compatible with proto.Marshal and +// proto.Unmarshal for serializing and de-serializing messages. +// +// The dynamic message supports binary and text marshaling, using protobuf's +// well-defined binary format and the same text format that protoc-generated +// types use. It also supports JSON serialization/de-serialization by +// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic +// messages can safely be used with the jsonpb package for JSON serialization +// and de-serialization. +// +// In addition to implementing the proto.Message interface and numerous related +// methods, it also provides inter-op with generated messages via conversion. +// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message +// contents from a dynamic message to a generated message and vice versa. +// +// When copying from a generated message into a dynamic message, if the +// generated message contains fields unknown to the dynamic message (e.g. not +// present in the descriptor used to create the dynamic message), these fields +// become known to the dynamic message (as per behavior described above in +// "Unrecognized Fields"). If the generated message has unrecognized fields of +// its own, including unrecognized extensions, they are preserved in the dynamic +// message. It is possible that the dynamic message knows about fields that the +// generated message did not, like if it has a different version of the +// descriptor or its MessageFactory has an ExtensionRegistry that knows about +// different extensions than were linked into the program. In this case, these +// unrecognized fields in the generated message will be known fields in the +// dynamic message. +// +// Similarly, when copying from a dynamic message into a generated message, if +// the dynamic message has unrecognized fields they can be preserved in the +// generated message (currently only for syntax proto2 since proto3 generated +// messages do not preserve unrecognized fields). If the generated message knows +// about fields that the dynamic message does not, these unrecognized fields may +// become known fields in the generated message. +// +// # Registries +// +// This package also contains a couple of registries, for managing known types +// and descriptors. +// +// The KnownTypeRegistry allows de-serialization of a dynamic message to use +// generated message types, instead of dynamic messages, for some kinds of +// nested message fields. This is particularly useful for working with proto +// messages that have special encodings as JSON (e.g. the well-known types), +// since the dynamic message does not try to handle these special cases in its +// JSON marshaling facilities. +// +// The ExtensionRegistry allows for recognizing and parsing extensions fields +// (for proto2 messages). +package dynamic diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go new file mode 100644 index 00000000..ff136b0e --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go @@ -0,0 +1,2830 @@ +package dynamic + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "reflect" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + protov2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/codec" + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/internal" +) + +// ErrUnknownTagNumber is an error that is returned when an operation refers +// to an unknown tag number. +var ErrUnknownTagNumber = errors.New("unknown tag number") + +// UnknownTagNumberError is the same as ErrUnknownTagNumber. +// Deprecated: use ErrUnknownTagNumber +var UnknownTagNumberError = ErrUnknownTagNumber + +// ErrUnknownFieldName is an error that is returned when an operation refers +// to an unknown field name. +var ErrUnknownFieldName = errors.New("unknown field name") + +// UnknownFieldNameError is the same as ErrUnknownFieldName. +// Deprecated: use ErrUnknownFieldName +var UnknownFieldNameError = ErrUnknownFieldName + +// ErrFieldIsNotMap is an error that is returned when map-related operations +// are attempted with fields that are not maps. +var ErrFieldIsNotMap = errors.New("field is not a map type") + +// FieldIsNotMapError is the same as ErrFieldIsNotMap. +// Deprecated: use ErrFieldIsNotMap +var FieldIsNotMapError = ErrFieldIsNotMap + +// ErrFieldIsNotRepeated is an error that is returned when repeated field +// operations are attempted with fields that are not repeated. +var ErrFieldIsNotRepeated = errors.New("field is not repeated") + +// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated. +// Deprecated: use ErrFieldIsNotRepeated +var FieldIsNotRepeatedError = ErrFieldIsNotRepeated + +// ErrIndexOutOfRange is an error that is returned when an invalid index is +// provided when access a single element of a repeated field. +var ErrIndexOutOfRange = errors.New("index is out of range") + +// IndexOutOfRangeError is the same as ErrIndexOutOfRange. +// Deprecated: use ErrIndexOutOfRange +var IndexOutOfRangeError = ErrIndexOutOfRange + +// ErrNumericOverflow is an error returned by operations that encounter a +// numeric value that is too large, for example de-serializing a value into an +// int32 field when the value is larger that can fit into a 32-bit value. +var ErrNumericOverflow = errors.New("numeric value is out of range") + +// NumericOverflowError is the same as ErrNumericOverflow. +// Deprecated: use ErrNumericOverflow +var NumericOverflowError = ErrNumericOverflow + +var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() +var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil)) +var typeOfBytes = reflect.TypeOf(([]byte)(nil)) + +// Message is a dynamic protobuf message. Instead of a generated struct, +// like most protobuf messages, this is a map of field number to values and +// a message descriptor, which is used to validate the field values and +// also to de-serialize messages (from the standard binary format, as well +// as from the text format and from JSON). +type Message struct { + md *desc.MessageDescriptor + er *ExtensionRegistry + mf *MessageFactory + extraFields map[int32]*desc.FieldDescriptor + values map[int32]interface{} + unknownFields map[int32][]UnknownField +} + +// UnknownField represents a field that was parsed from the binary wire +// format for a message, but was not a recognized field number. Enough +// information is preserved so that re-serializing the message won't lose +// any of the unrecognized data. +type UnknownField struct { + // Encoding indicates how the unknown field was encoded on the wire. If it + // is proto.WireBytes or proto.WireGroupStart then Contents will be set to + // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least + // significant 32 bits of Value. Otherwise, the data is in all 64 bits of + // Value. + Encoding int8 + Contents []byte + Value uint64 +} + +// NewMessage creates a new dynamic message for the type represented by the given +// message descriptor. During de-serialization, a default MessageFactory is used to +// instantiate any nested message fields and no extension fields will be parsed. To +// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage. +func NewMessage(md *desc.MessageDescriptor) *Message { + return NewMessageWithMessageFactory(md, nil) +} + +// NewMessageWithExtensionRegistry creates a new dynamic message for the type +// represented by the given message descriptor. During de-serialization, the given +// ExtensionRegistry is used to parse extension fields and nested messages will be +// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er). +func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message { + mf := NewMessageFactoryWithExtensionRegistry(er) + return NewMessageWithMessageFactory(md, mf) +} + +// NewMessageWithMessageFactory creates a new dynamic message for the type +// represented by the given message descriptor. During de-serialization, the given +// MessageFactory is used to instantiate nested messages. +func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message { + var er *ExtensionRegistry + if mf != nil { + er = mf.er + } + return &Message{ + md: md, + mf: mf, + er: er, + } +} + +// AsDynamicMessage converts the given message to a dynamic message. If the +// given message is dynamic, it is returned. Otherwise, a dynamic message is +// created using NewMessage. +func AsDynamicMessage(msg proto.Message) (*Message, error) { + return AsDynamicMessageWithMessageFactory(msg, nil) +} + +// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic +// message. If the given message is dynamic, it is returned. Otherwise, a +// dynamic message is created using NewMessageWithExtensionRegistry. +func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) { + mf := NewMessageFactoryWithExtensionRegistry(er) + return AsDynamicMessageWithMessageFactory(msg, mf) +} + +// AsDynamicMessageWithMessageFactory converts the given message to a dynamic +// message. If the given message is dynamic, it is returned. Otherwise, a +// dynamic message is created using NewMessageWithMessageFactory. +func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) { + if dm, ok := msg.(*Message); ok { + return dm, nil + } + md, err := desc.LoadMessageDescriptorForMessage(msg) + if err != nil { + return nil, err + } + dm := NewMessageWithMessageFactory(md, mf) + err = dm.mergeFrom(msg) + if err != nil { + return nil, err + } + return dm, nil +} + +// GetMessageDescriptor returns a descriptor for this message's type. +func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor { + return m.md +} + +// GetKnownFields returns a slice of descriptors for all known fields. The +// fields will not be in any defined order. +func (m *Message) GetKnownFields() []*desc.FieldDescriptor { + if len(m.extraFields) == 0 { + return m.md.GetFields() + } + flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields)) + copy(flds, m.md.GetFields()) + for _, fld := range m.extraFields { + if !fld.IsExtension() { + flds = append(flds, fld) + } + } + return flds +} + +// GetKnownExtensions returns a slice of descriptors for all extensions known by +// the message's extension registry. The fields will not be in any defined order. +func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor { + if !m.md.IsExtendable() { + return nil + } + exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName()) + for _, fld := range m.extraFields { + if fld.IsExtension() { + exts = append(exts, fld) + } + } + return exts +} + +// GetUnknownFields returns a slice of tag numbers for all unknown fields that +// this message contains. The tags will not be in any defined order. +func (m *Message) GetUnknownFields() []int32 { + flds := make([]int32, 0, len(m.unknownFields)) + for tag := range m.unknownFields { + flds = append(flds, tag) + } + return flds +} + +// Descriptor returns the serialized form of the file descriptor in which the +// message was defined and a path to the message type therein. This mimics the +// method of the same name on message types generated by protoc. +func (m *Message) Descriptor() ([]byte, []int) { + // get encoded file descriptor + b, err := proto.Marshal(m.md.GetFile().AsProto()) + if err != nil { + panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err)) + } + var zippedBytes bytes.Buffer + w := gzip.NewWriter(&zippedBytes) + if _, err := w.Write(b); err != nil { + panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err)) + } + if err := w.Close(); err != nil { + panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err)) + } + + // and path to message + path := []int{} + var d desc.Descriptor + name := m.md.GetFullyQualifiedName() + for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() { + found := false + switch d := d.(type) { + case (*desc.FileDescriptor): + for i, md := range d.GetMessageTypes() { + if md.GetFullyQualifiedName() == name { + found = true + path = append(path, i) + } + } + case (*desc.MessageDescriptor): + for i, md := range d.GetNestedMessageTypes() { + if md.GetFullyQualifiedName() == name { + found = true + path = append(path, i) + } + } + } + if !found { + panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName())) + } + } + // reverse the path + i := 0 + j := len(path) - 1 + for i < j { + path[i], path[j] = path[j], path[i] + i++ + j-- + } + + return zippedBytes.Bytes(), path +} + +// XXX_MessageName returns the fully qualified name of this message's type. This +// allows dynamic messages to be used with proto.MessageName. +func (m *Message) XXX_MessageName() string { + return m.md.GetFullyQualifiedName() +} + +// FindFieldDescriptor returns a field descriptor for the given tag number. This +// searches known fields in the descriptor, known fields discovered during calls +// to GetField or SetField, and extension fields known by the message's extension +// registry. It returns nil if the tag is unknown. +func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor { + fd := m.md.FindFieldByNumber(tagNumber) + if fd != nil { + return fd + } + fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber) + if fd != nil { + return fd + } + return m.extraFields[tagNumber] +} + +// FindFieldDescriptorByName returns a field descriptor for the given field +// name. This searches known fields in the descriptor, known fields discovered +// during calls to GetField or SetField, and extension fields known by the +// message's extension registry. It returns nil if the name is unknown. If the +// given name refers to an extension, it should be fully qualified and may be +// optionally enclosed in parentheses or brackets. +func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor { + if name == "" { + return nil + } + fd := m.md.FindFieldByName(name) + if fd != nil { + return fd + } + mustBeExt := false + if name[0] == '(' { + if name[len(name)-1] != ')' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } else if name[0] == '[' { + if name[len(name)-1] != ']' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } + fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name) + if fd != nil { + return fd + } + for _, fd := range m.extraFields { + if fd.IsExtension() && name == fd.GetFullyQualifiedName() { + return fd + } else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() { + return fd + } + } + + return nil +} + +// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON +// name. This searches known fields in the descriptor, known fields discovered +// during calls to GetField or SetField, and extension fields known by the +// message's extension registry. If no field matches the given JSON name, it +// will fall back to searching field names (e.g. FindFieldDescriptorByName). If +// this also yields no match, nil is returned. +func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor { + if name == "" { + return nil + } + fd := m.md.FindFieldByJSONName(name) + if fd != nil { + return fd + } + mustBeExt := false + if name[0] == '(' { + if name[len(name)-1] != ')' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } else if name[0] == '[' { + if name[len(name)-1] != ']' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } + fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name) + if fd != nil { + return fd + } + for _, fd := range m.extraFields { + if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() { + return fd + } else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() { + return fd + } + } + + // try non-JSON names + return m.FindFieldDescriptorByName(name) +} + +func (m *Message) checkField(fd *desc.FieldDescriptor) error { + return checkField(fd, m.md) +} + +func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error { + if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() { + return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName()) + } + if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) { + return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges()) + } + return nil +} + +// GetField returns the value for the given field descriptor. It panics if an +// error is encountered. See TryGetField. +func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} { + if v, err := m.TryGetField(fd); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetField returns the value for the given field descriptor. An error is +// returned if the given field descriptor does not belong to the right message +// type. +// +// The Go type of the returned value, for scalar fields, is the same as protoc +// would generate for the field (in a non-dynamic message). The table below +// lists the scalar types and the corresponding Go types. +// +// +-------------------------+-----------+ +// | Declared Type | Go Type | +// +-------------------------+-----------+ +// | int32, sint32, sfixed32 | int32 | +// | int64, sint64, sfixed64 | int64 | +// | uint32, fixed32 | uint32 | +// | uint64, fixed64 | uint64 | +// | float | float32 | +// | double | double32 | +// | bool | bool | +// | string | string | +// | bytes | []byte | +// +-------------------------+-----------+ +// +// Values for enum fields will always be int32 values. You can use the enum +// descriptor associated with the field to lookup value names with those values. +// Values for message type fields may be an instance of the generated type *or* +// may be another *dynamic.Message that represents the type. +// +// If the given field is a map field, the returned type will be +// map[interface{}]interface{}. The actual concrete types of keys and values is +// as described above. If the given field is a (non-map) repeated field, the +// returned type is always []interface{}; the type of the actual elements is as +// described above. +// +// If this message has no value for the given field, its default value is +// returned. If the message is defined in a file with "proto3" syntax, the +// default is always the zero value for the field. The default value for map and +// repeated fields is a nil map or slice (respectively). For field's whose types +// is a message, the default value is an empty message for "proto2" syntax or a +// nil message for "proto3" syntax. Note that the in the latter case, a non-nil +// interface with a nil pointer is returned, not a nil interface. Also note that +// whether the returned value is an empty message or nil depends on if *this* +// message was defined as "proto3" syntax, not the message type referred to by +// the field's type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The parsed value will be returned, or an error will +// be returned if the unknown value cannot be parsed according to the field +// descriptor's type information. +func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) { + if err := m.checkField(fd); err != nil { + return nil, err + } + return m.getField(fd) +} + +// GetFieldByName returns the value for the field with the given name. It panics +// if an error is encountered. See TryGetFieldByName. +func (m *Message) GetFieldByName(name string) interface{} { + if v, err := m.TryGetFieldByName(name); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetFieldByName returns the value for the field with the given name. An +// error is returned if the given name is unknown. If the given name refers to +// an extension field, it should be fully qualified and optionally enclosed in +// parenthesis or brackets. +// +// If this message has no value for the given field, its default value is +// returned. (See TryGetField for more info on types and default field values.) +func (m *Message) TryGetFieldByName(name string) (interface{}, error) { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return nil, UnknownFieldNameError + } + return m.getField(fd) +} + +// GetFieldByNumber returns the value for the field with the given tag number. +// It panics if an error is encountered. See TryGetFieldByNumber. +func (m *Message) GetFieldByNumber(tagNumber int) interface{} { + if v, err := m.TryGetFieldByNumber(tagNumber); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetFieldByNumber returns the value for the field with the given tag +// number. An error is returned if the given tag is unknown. +// +// If this message has no value for the given field, its default value is +// returned. (See TryGetField for more info on types and default field values.) +func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return nil, UnknownTagNumberError + } + return m.getField(fd) +} + +func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) { + return m.doGetField(fd, false) +} + +func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) { + res := m.values[fd.GetNumber()] + if res == nil { + var err error + if res, err = m.parseUnknownField(fd); err != nil { + return nil, err + } + if res == nil { + if nilIfAbsent { + return nil, nil + } else { + def := fd.GetDefaultValue() + if def != nil { + return def, nil + } + // GetDefaultValue only returns nil for message types + md := fd.GetMessageType() + if m.md.IsProto3() { + return nilMessage(md), nil + } else { + // for proto2, return default instance of message + return m.mf.NewMessage(md), nil + } + } + } + } + rt := reflect.TypeOf(res) + if rt.Kind() == reflect.Map { + // make defensive copies to prevent caller from storing illegal keys and values + m := res.(map[interface{}]interface{}) + res := map[interface{}]interface{}{} + for k, v := range m { + res[k] = v + } + return res, nil + } else if rt.Kind() == reflect.Slice && rt != typeOfBytes { + // make defensive copies to prevent caller from storing illegal elements + sl := res.([]interface{}) + res := make([]interface{}, len(sl)) + copy(res, sl) + return res, nil + } + return res, nil +} + +func nilMessage(md *desc.MessageDescriptor) interface{} { + // try to return a proper nil pointer + msgType := proto.MessageType(md.GetFullyQualifiedName()) + if msgType != nil && msgType.Implements(typeOfProtoMessage) { + return reflect.Zero(msgType).Interface().(proto.Message) + } + // fallback to nil dynamic message pointer + return (*Message)(nil) +} + +// HasField returns true if this message has a value for the given field. If the +// given field is not valid (e.g. belongs to a different message type), false is +// returned. If this message is defined in a file with "proto3" syntax, this +// will return false even if a field was explicitly assigned its zero value (the +// zero values for a field are intentionally indistinguishable from absent). +func (m *Message) HasField(fd *desc.FieldDescriptor) bool { + if err := m.checkField(fd); err != nil { + return false + } + return m.HasFieldNumber(int(fd.GetNumber())) +} + +// HasFieldName returns true if this message has a value for a field with the +// given name. If the given name is unknown, this returns false. +func (m *Message) HasFieldName(name string) bool { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return false + } + return m.HasFieldNumber(int(fd.GetNumber())) +} + +// HasFieldNumber returns true if this message has a value for a field with the +// given tag number. If the given tag is unknown, this returns false. +func (m *Message) HasFieldNumber(tagNumber int) bool { + if _, ok := m.values[int32(tagNumber)]; ok { + return true + } + _, ok := m.unknownFields[int32(tagNumber)] + return ok +} + +// SetField sets the value for the given field descriptor to the given value. It +// panics if an error is encountered. See TrySetField. +func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) { + if err := m.TrySetField(fd, val); err != nil { + panic(err.Error()) + } +} + +// TrySetField sets the value for the given field descriptor to the given value. +// An error is returned if the given field descriptor does not belong to the +// right message type or if the given value is not a correct/compatible type for +// the given field. +// +// The Go type expected for a field is the same as TryGetField would return for +// the field. So message values can be supplied as either the correct generated +// message type or as a *dynamic.Message. +// +// Since it is cumbersome to work with dynamic messages, some concessions are +// made to simplify usage regarding types: +// +// 1. If a numeric type is provided that can be converted *without loss or +// overflow*, it is accepted. This allows for setting int64 fields using int +// or int32 values. Similarly for uint64 with uint and uint32 values and for +// float64 fields with float32 values. +// 2. The value can be a named type, as long as its underlying type is correct. +// 3. Map and repeated fields can be set using any kind of concrete map or +// slice type, as long as the values within are all of the correct type. So +// a field defined as a 'map` can be set using a +// map[string]int32, a map[string]interface{}, or even a +// map[interface{}]interface{}. +// 4. Finally, dynamic code that chooses to not treat maps as a special-case +// find that they can set map fields using a slice where each element is a +// message that matches the implicit map-entry field message type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is cleared, replaced by the given known +// value. +func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.setField(fd, val) +} + +// SetFieldByName sets the value for the field with the given name to the given +// value. It panics if an error is encountered. See TrySetFieldByName. +func (m *Message) SetFieldByName(name string, val interface{}) { + if err := m.TrySetFieldByName(name, val); err != nil { + panic(err.Error()) + } +} + +// TrySetFieldByName sets the value for the field with the given name to the +// given value. An error is returned if the given name is unknown or if the +// given value has an incorrect type. If the given name refers to an extension +// field, it should be fully qualified and optionally enclosed in parenthesis or +// brackets. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetFieldByName(name string, val interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.setField(fd, val) +} + +// SetFieldByNumber sets the value for the field with the given tag number to +// the given value. It panics if an error is encountered. See +// TrySetFieldByNumber. +func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) { + if err := m.TrySetFieldByNumber(tagNumber, val); err != nil { + panic(err.Error()) + } +} + +// TrySetFieldByNumber sets the value for the field with the given tag number to +// the given value. An error is returned if the given tag is unknown or if the +// given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.setField(fd, val) +} + +func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error { + var err error + if val, err = validFieldValue(fd, val); err != nil { + return err + } + m.internalSetField(fd, val) + return nil +} + +func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) { + if fd.IsRepeated() { + // Unset fields and zero-length fields are indistinguishable, in both + // proto2 and proto3 syntax + if reflect.ValueOf(val).Len() == 0 { + if m.values != nil { + delete(m.values, fd.GetNumber()) + } + return + } + } else if m.md.IsProto3() && fd.GetOneOf() == nil { + // proto3 considers fields that are set to their zero value as unset + // (we already handled repeated fields above) + var equal bool + if b, ok := val.([]byte); ok { + // can't compare slices, so we have to special-case []byte values + equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte)) + } else { + defVal := fd.GetDefaultValue() + equal = defVal == val + if !equal && defVal == nil { + // above just checks if value is the nil interface, + // but we should also test if the given value is a + // nil pointer + rv := reflect.ValueOf(val) + if rv.Kind() == reflect.Ptr && rv.IsNil() { + equal = true + } + } + } + if equal { + if m.values != nil { + delete(m.values, fd.GetNumber()) + } + return + } + } + if m.values == nil { + m.values = map[int32]interface{}{} + } + m.values[fd.GetNumber()] = val + // if this field is part of a one-of, make sure all other one-of choices are cleared + od := fd.GetOneOf() + if od != nil { + for _, other := range od.GetChoices() { + if other.GetNumber() != fd.GetNumber() { + delete(m.values, other.GetNumber()) + } + } + } + // also clear any unknown fields + if m.unknownFields != nil { + delete(m.unknownFields, fd.GetNumber()) + } + // and add this field if it was previously unknown + if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil { + m.addField(fd) + } +} + +func (m *Message) addField(fd *desc.FieldDescriptor) { + if m.extraFields == nil { + m.extraFields = map[int32]*desc.FieldDescriptor{} + } + m.extraFields[fd.GetNumber()] = fd +} + +// ClearField removes any value for the given field. It panics if an error is +// encountered. See TryClearField. +func (m *Message) ClearField(fd *desc.FieldDescriptor) { + if err := m.TryClearField(fd); err != nil { + panic(err.Error()) + } +} + +// TryClearField removes any value for the given field. An error is returned if +// the given field descriptor does not belong to the right message type. +func (m *Message) TryClearField(fd *desc.FieldDescriptor) error { + if err := m.checkField(fd); err != nil { + return err + } + m.clearField(fd) + return nil +} + +// ClearFieldByName removes any value for the field with the given name. It +// panics if an error is encountered. See TryClearFieldByName. +func (m *Message) ClearFieldByName(name string) { + if err := m.TryClearFieldByName(name); err != nil { + panic(err.Error()) + } +} + +// TryClearFieldByName removes any value for the field with the given name. An +// error is returned if the given name is unknown. If the given name refers to +// an extension field, it should be fully qualified and optionally enclosed in +// parenthesis or brackets. +func (m *Message) TryClearFieldByName(name string) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + m.clearField(fd) + return nil +} + +// ClearFieldByNumber removes any value for the field with the given tag number. +// It panics if an error is encountered. See TryClearFieldByNumber. +func (m *Message) ClearFieldByNumber(tagNumber int) { + if err := m.TryClearFieldByNumber(tagNumber); err != nil { + panic(err.Error()) + } +} + +// TryClearFieldByNumber removes any value for the field with the given tag +// number. An error is returned if the given tag is unknown. +func (m *Message) TryClearFieldByNumber(tagNumber int) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + m.clearField(fd) + return nil +} + +func (m *Message) clearField(fd *desc.FieldDescriptor) { + // clear value + if m.values != nil { + delete(m.values, fd.GetNumber()) + } + // also clear any unknown fields + if m.unknownFields != nil { + delete(m.unknownFields, fd.GetNumber()) + } + // and add this field if it was previously unknown + if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil { + m.addField(fd) + } +} + +// GetOneOfField returns which of the given one-of's fields is set and the +// corresponding value. It panics if an error is encountered. See +// TryGetOneOfField. +func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) { + if fd, val, err := m.TryGetOneOfField(od); err != nil { + panic(err.Error()) + } else { + return fd, val + } +} + +// TryGetOneOfField returns which of the given one-of's fields is set and the +// corresponding value. An error is returned if the given one-of belongs to the +// wrong message type. If the given one-of has no field set, this method will +// return nil, nil. +// +// The type of the value, if one is set, is the same as would be returned by +// TryGetField using the returned field descriptor. +// +// Like with TryGetField, if the given one-of contains any fields that are not +// known (e.g. not present in this message's descriptor), they will become known +// and any unknown value will be parsed (and become a known value on success). +func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) { + if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() { + return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName()) + } + for _, fd := range od.GetChoices() { + val, err := m.doGetField(fd, true) + if err != nil { + return nil, nil, err + } + if val != nil { + return fd, val, nil + } + } + return nil, nil, nil +} + +// ClearOneOfField removes any value for any of the given one-of's fields. It +// panics if an error is encountered. See TryClearOneOfField. +func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) { + if err := m.TryClearOneOfField(od); err != nil { + panic(err.Error()) + } +} + +// TryClearOneOfField removes any value for any of the given one-of's fields. An +// error is returned if the given one-of descriptor does not belong to the right +// message type. +func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error { + if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() { + return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName()) + } + for _, fd := range od.GetChoices() { + m.clearField(fd) + } + return nil +} + +// GetMapField returns the value for the given map field descriptor and given +// key. It panics if an error is encountered. See TryGetMapField. +func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} { + if v, err := m.TryGetMapField(fd, key); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetMapField returns the value for the given map field descriptor and given +// key. An error is returned if the given field descriptor does not belong to +// the right message type or if it is not a map field. +// +// If the map field does not contain the requested key, this method returns +// nil, nil. The Go type of the value returned mirrors the type that protoc +// would generate for the field. (See TryGetField for more details on types). +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The parsed value will be searched for the requested +// key and any value returned. An error will be returned if the unknown value +// cannot be parsed according to the field descriptor's type information. +func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) { + if err := m.checkField(fd); err != nil { + return nil, err + } + return m.getMapField(fd, key) +} + +// GetMapFieldByName returns the value for the map field with the given name and +// given key. It panics if an error is encountered. See TryGetMapFieldByName. +func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} { + if v, err := m.TryGetMapFieldByName(name, key); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetMapFieldByName returns the value for the map field with the given name +// and given key. An error is returned if the given name is unknown or if it +// names a field that is not a map field. +// +// If this message has no value for the given field or the value has no value +// for the requested key, then this method returns nil, nil. +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return nil, UnknownFieldNameError + } + return m.getMapField(fd, key) +} + +// GetMapFieldByNumber returns the value for the map field with the given tag +// number and given key. It panics if an error is encountered. See +// TryGetMapFieldByNumber. +func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} { + if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetMapFieldByNumber returns the value for the map field with the given tag +// number and given key. An error is returned if the given tag is unknown or if +// it indicates a field that is not a map field. +// +// If this message has no value for the given field or the value has no value +// for the requested key, then this method returns nil, nil. +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return nil, UnknownTagNumberError + } + return m.getMapField(fd, key) +} + +func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) { + if !fd.IsMap() { + return nil, FieldIsNotMapError + } + kfd := fd.GetMessageType().GetFields()[0] + ki, err := validElementFieldValue(kfd, key, false) + if err != nil { + return nil, err + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err = m.parseUnknownField(fd); err != nil { + return nil, err + } else if mp == nil { + return nil, nil + } + } + return mp.(map[interface{}]interface{})[ki], nil +} + +// ForEachMapFieldEntry executes the given function for each entry in the map +// value for the given field descriptor. It stops iteration if the function +// returns false. It panics if an error is encountered. See +// TryForEachMapFieldEntry. +func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) { + if err := m.TryForEachMapFieldEntry(fd, fn); err != nil { + panic(err.Error()) + } +} + +// TryForEachMapFieldEntry executes the given function for each entry in the map +// value for the given field descriptor. An error is returned if the given field +// descriptor does not belong to the right message type or if it is not a map +// field. +// +// Iteration ends either when all entries have been examined or when the given +// function returns false. So the function is expected to return true for normal +// iteration and false to break out. If this message has no value for the given +// field, it returns without invoking the given function. +// +// The Go type of the key and value supplied to the function mirrors the type +// that protoc would generate for the field. (See TryGetField for more details +// on types). +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The parsed value will be searched for the requested +// key and any value returned. An error will be returned if the unknown value +// cannot be parsed according to the field descriptor's type information. +func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.forEachMapFieldEntry(fd, fn) +} + +// ForEachMapFieldEntryByName executes the given function for each entry in the +// map value for the field with the given name. It stops iteration if the +// function returns false. It panics if an error is encountered. See +// TryForEachMapFieldEntryByName. +func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) { + if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil { + panic(err.Error()) + } +} + +// TryForEachMapFieldEntryByName executes the given function for each entry in +// the map value for the field with the given name. It stops iteration if the +// function returns false. An error is returned if the given name is unknown or +// if it names a field that is not a map field. +// +// If this message has no value for the given field, it returns without ever +// invoking the given function. +// +// (See TryGetField for more info on types supplied to the function.) +func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.forEachMapFieldEntry(fd, fn) +} + +// ForEachMapFieldEntryByNumber executes the given function for each entry in +// the map value for the field with the given tag number. It stops iteration if +// the function returns false. It panics if an error is encountered. See +// TryForEachMapFieldEntryByNumber. +func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) { + if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil { + panic(err.Error()) + } +} + +// TryForEachMapFieldEntryByNumber executes the given function for each entry in +// the map value for the field with the given tag number. It stops iteration if +// the function returns false. An error is returned if the given tag is unknown +// or if it indicates a field that is not a map field. +// +// If this message has no value for the given field, it returns without ever +// invoking the given function. +// +// (See TryGetField for more info on types supplied to the function.) +func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.forEachMapFieldEntry(fd, fn) +} + +func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error { + if !fd.IsMap() { + return FieldIsNotMapError + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err := m.parseUnknownField(fd); err != nil { + return err + } else if mp == nil { + return nil + } + } + for k, v := range mp.(map[interface{}]interface{}) { + if !fn(k, v) { + break + } + } + return nil +} + +// PutMapField sets the value for the given map field descriptor and given key +// to the given value. It panics if an error is encountered. See TryPutMapField. +func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) { + if err := m.TryPutMapField(fd, key, val); err != nil { + panic(err.Error()) + } +} + +// TryPutMapField sets the value for the given map field descriptor and given +// key to the given value. An error is returned if the given field descriptor +// does not belong to the right message type, if the given field is not a map +// field, or if the given value is not a correct/compatible type for the given +// field. +// +// The Go type expected for a field is the same as required by TrySetField for +// a field with the same type as the map's value type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is cleared, replaced by the given known +// value. +func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.putMapField(fd, key, val) +} + +// PutMapFieldByName sets the value for the map field with the given name and +// given key to the given value. It panics if an error is encountered. See +// TryPutMapFieldByName. +func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) { + if err := m.TryPutMapFieldByName(name, key, val); err != nil { + panic(err.Error()) + } +} + +// TryPutMapFieldByName sets the value for the map field with the given name and +// the given key to the given value. An error is returned if the given name is +// unknown, if it names a field that is not a map, or if the given value has an +// incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.putMapField(fd, key, val) +} + +// PutMapFieldByNumber sets the value for the map field with the given tag +// number and given key to the given value. It panics if an error is +// encountered. See TryPutMapFieldByNumber. +func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) { + if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil { + panic(err.Error()) + } +} + +// TryPutMapFieldByNumber sets the value for the map field with the given tag +// number and the given key to the given value. An error is returned if the +// given tag is unknown, if it indicates a field that is not a map, or if the +// given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.putMapField(fd, key, val) +} + +func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error { + if !fd.IsMap() { + return FieldIsNotMapError + } + kfd := fd.GetMessageType().GetFields()[0] + ki, err := validElementFieldValue(kfd, key, false) + if err != nil { + return err + } + vfd := fd.GetMessageType().GetFields()[1] + vi, err := validElementFieldValue(vfd, val, true) + if err != nil { + return err + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err = m.parseUnknownField(fd); err != nil { + return err + } else if mp == nil { + m.internalSetField(fd, map[interface{}]interface{}{ki: vi}) + return nil + } + } + mp.(map[interface{}]interface{})[ki] = vi + return nil +} + +// RemoveMapField changes the value for the given field descriptor by removing +// any value associated with the given key. It panics if an error is +// encountered. See TryRemoveMapField. +func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) { + if err := m.TryRemoveMapField(fd, key); err != nil { + panic(err.Error()) + } +} + +// TryRemoveMapField changes the value for the given field descriptor by +// removing any value associated with the given key. An error is returned if the +// given field descriptor does not belong to the right message type or if the +// given field is not a map field. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is parsed and any value for the given key +// removed. +func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.removeMapField(fd, key) +} + +// RemoveMapFieldByName changes the value for the field with the given name by +// removing any value associated with the given key. It panics if an error is +// encountered. See TryRemoveMapFieldByName. +func (m *Message) RemoveMapFieldByName(name string, key interface{}) { + if err := m.TryRemoveMapFieldByName(name, key); err != nil { + panic(err.Error()) + } +} + +// TryRemoveMapFieldByName changes the value for the field with the given name +// by removing any value associated with the given key. An error is returned if +// the given name is unknown or if it names a field that is not a map. +func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.removeMapField(fd, key) +} + +// RemoveMapFieldByNumber changes the value for the field with the given tag +// number by removing any value associated with the given key. It panics if an +// error is encountered. See TryRemoveMapFieldByNumber. +func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) { + if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil { + panic(err.Error()) + } +} + +// TryRemoveMapFieldByNumber changes the value for the field with the given tag +// number by removing any value associated with the given key. An error is +// returned if the given tag is unknown or if it indicates a field that is not +// a map. +func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.removeMapField(fd, key) +} + +func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error { + if !fd.IsMap() { + return FieldIsNotMapError + } + kfd := fd.GetMessageType().GetFields()[0] + ki, err := validElementFieldValue(kfd, key, false) + if err != nil { + return err + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err = m.parseUnknownField(fd); err != nil { + return err + } else if mp == nil { + return nil + } + } + res := mp.(map[interface{}]interface{}) + delete(res, ki) + if len(res) == 0 { + delete(m.values, fd.GetNumber()) + } + return nil +} + +// FieldLength returns the number of elements in this message for the given +// field descriptor. It panics if an error is encountered. See TryFieldLength. +func (m *Message) FieldLength(fd *desc.FieldDescriptor) int { + l, err := m.TryFieldLength(fd) + if err != nil { + panic(err.Error()) + } + return l +} + +// TryFieldLength returns the number of elements in this message for the given +// field descriptor. An error is returned if the given field descriptor does not +// belong to the right message type or if it is neither a map field nor a +// repeated field. +func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) { + if err := m.checkField(fd); err != nil { + return 0, err + } + return m.fieldLength(fd) +} + +// FieldLengthByName returns the number of elements in this message for the +// field with the given name. It panics if an error is encountered. See +// TryFieldLengthByName. +func (m *Message) FieldLengthByName(name string) int { + l, err := m.TryFieldLengthByName(name) + if err != nil { + panic(err.Error()) + } + return l +} + +// TryFieldLengthByName returns the number of elements in this message for the +// field with the given name. An error is returned if the given name is unknown +// or if the named field is neither a map field nor a repeated field. +func (m *Message) TryFieldLengthByName(name string) (int, error) { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return 0, UnknownFieldNameError + } + return m.fieldLength(fd) +} + +// FieldLengthByNumber returns the number of elements in this message for the +// field with the given tag number. It panics if an error is encountered. See +// TryFieldLengthByNumber. +func (m *Message) FieldLengthByNumber(tagNumber int32) int { + l, err := m.TryFieldLengthByNumber(tagNumber) + if err != nil { + panic(err.Error()) + } + return l +} + +// TryFieldLengthByNumber returns the number of elements in this message for the +// field with the given tag number. An error is returned if the given tag is +// unknown or if the named field is neither a map field nor a repeated field. +func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return 0, UnknownTagNumberError + } + return m.fieldLength(fd) +} + +func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) { + if !fd.IsRepeated() { + return 0, FieldIsNotRepeatedError + } + val := m.values[fd.GetNumber()] + if val == nil { + var err error + if val, err = m.parseUnknownField(fd); err != nil { + return 0, err + } else if val == nil { + return 0, nil + } + } + if sl, ok := val.([]interface{}); ok { + return len(sl), nil + } else if mp, ok := val.(map[interface{}]interface{}); ok { + return len(mp), nil + } + return 0, nil +} + +// GetRepeatedField returns the value for the given repeated field descriptor at +// the given index. It panics if an error is encountered. See +// TryGetRepeatedField. +func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} { + if v, err := m.TryGetRepeatedField(fd, index); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetRepeatedField returns the value for the given repeated field descriptor +// at the given index. An error is returned if the given field descriptor does +// not belong to the right message type, if it is not a repeated field, or if +// the given index is out of range (less than zero or greater than or equal to +// the length of the repeated field). Also, even though map fields technically +// are repeated fields, if the given field is a map field an error will result: +// map representation does not lend itself to random access by index. +// +// The Go type of the value returned mirrors the type that protoc would generate +// for the field's element type. (See TryGetField for more details on types). +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The value at the given index in the parsed value +// will be returned. An error will be returned if the unknown value cannot be +// parsed according to the field descriptor's type information. +func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) { + if index < 0 { + return nil, IndexOutOfRangeError + } + if err := m.checkField(fd); err != nil { + return nil, err + } + return m.getRepeatedField(fd, index) +} + +// GetRepeatedFieldByName returns the value for the repeated field with the +// given name at the given index. It panics if an error is encountered. See +// TryGetRepeatedFieldByName. +func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} { + if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetRepeatedFieldByName returns the value for the repeated field with the +// given name at the given index. An error is returned if the given name is +// unknown, if it names a field that is not a repeated field (or is a map +// field), or if the given index is out of range (less than zero or greater +// than or equal to the length of the repeated field). +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) { + if index < 0 { + return nil, IndexOutOfRangeError + } + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return nil, UnknownFieldNameError + } + return m.getRepeatedField(fd, index) +} + +// GetRepeatedFieldByNumber returns the value for the repeated field with the +// given tag number at the given index. It panics if an error is encountered. +// See TryGetRepeatedFieldByNumber. +func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} { + if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetRepeatedFieldByNumber returns the value for the repeated field with the +// given tag number at the given index. An error is returned if the given tag is +// unknown, if it indicates a field that is not a repeated field (or is a map +// field), or if the given index is out of range (less than zero or greater than +// or equal to the length of the repeated field). +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) { + if index < 0 { + return nil, IndexOutOfRangeError + } + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return nil, UnknownTagNumberError + } + return m.getRepeatedField(fd, index) +} + +func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) { + if fd.IsMap() || !fd.IsRepeated() { + return nil, FieldIsNotRepeatedError + } + sl := m.values[fd.GetNumber()] + if sl == nil { + var err error + if sl, err = m.parseUnknownField(fd); err != nil { + return nil, err + } else if sl == nil { + return nil, IndexOutOfRangeError + } + } + res := sl.([]interface{}) + if index >= len(res) { + return nil, IndexOutOfRangeError + } + return res[index], nil +} + +// AddRepeatedField appends the given value to the given repeated field. It +// panics if an error is encountered. See TryAddRepeatedField. +func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) { + if err := m.TryAddRepeatedField(fd, val); err != nil { + panic(err.Error()) + } +} + +// TryAddRepeatedField appends the given value to the given repeated field. An +// error is returned if the given field descriptor does not belong to the right +// message type, if the given field is not repeated, or if the given value is +// not a correct/compatible type for the given field. If the given field is a +// map field, the call will succeed if the given value is an instance of the +// map's entry message type. +// +// The Go type expected for a field is the same as required by TrySetField for +// a non-repeated field of the same type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is parsed and the given value is appended to +// it. +func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.addRepeatedField(fd, val) +} + +// AddRepeatedFieldByName appends the given value to the repeated field with the +// given name. It panics if an error is encountered. See +// TryAddRepeatedFieldByName. +func (m *Message) AddRepeatedFieldByName(name string, val interface{}) { + if err := m.TryAddRepeatedFieldByName(name, val); err != nil { + panic(err.Error()) + } +} + +// TryAddRepeatedFieldByName appends the given value to the repeated field with +// the given name. An error is returned if the given name is unknown, if it +// names a field that is not repeated, or if the given value has an incorrect +// type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.addRepeatedField(fd, val) +} + +// AddRepeatedFieldByNumber appends the given value to the repeated field with +// the given tag number. It panics if an error is encountered. See +// TryAddRepeatedFieldByNumber. +func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) { + if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil { + panic(err.Error()) + } +} + +// TryAddRepeatedFieldByNumber appends the given value to the repeated field +// with the given tag number. An error is returned if the given tag is unknown, +// if it indicates a field that is not repeated, or if the given value has an +// incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.addRepeatedField(fd, val) +} + +func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error { + if !fd.IsRepeated() { + return FieldIsNotRepeatedError + } + val, err := validElementFieldValue(fd, val, false) + if err != nil { + return err + } + + if fd.IsMap() { + // We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow + // adding entries one at a time (as if the field were a normal repeated field). + msg := val.(proto.Message) + dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf) + if err != nil { + return err + } + k, err := dm.TryGetFieldByNumber(1) + if err != nil { + return err + } + v, err := dm.TryGetFieldByNumber(2) + if err != nil { + return err + } + return m.putMapField(fd, k, v) + } + + sl := m.values[fd.GetNumber()] + if sl == nil { + if sl, err = m.parseUnknownField(fd); err != nil { + return err + } else if sl == nil { + sl = []interface{}{} + } + } + res := sl.([]interface{}) + res = append(res, val) + m.internalSetField(fd, res) + return nil +} + +// SetRepeatedField sets the value for the given repeated field descriptor and +// given index to the given value. It panics if an error is encountered. See +// SetRepeatedField. +func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) { + if err := m.TrySetRepeatedField(fd, index, val); err != nil { + panic(err.Error()) + } +} + +// TrySetRepeatedField sets the value for the given repeated field descriptor +// and given index to the given value. An error is returned if the given field +// descriptor does not belong to the right message type, if the given field is +// not repeated, or if the given value is not a correct/compatible type for the +// given field. Also, even though map fields technically are repeated fields, if +// the given field is a map field an error will result: map representation does +// not lend itself to random access by index. +// +// The Go type expected for a field is the same as required by TrySetField for +// a non-repeated field of the same type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is parsed and the element at the given index +// is replaced with the given value. +func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error { + if index < 0 { + return IndexOutOfRangeError + } + if err := m.checkField(fd); err != nil { + return err + } + return m.setRepeatedField(fd, index, val) +} + +// SetRepeatedFieldByName sets the value for the repeated field with the given +// name and given index to the given value. It panics if an error is +// encountered. See TrySetRepeatedFieldByName. +func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) { + if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil { + panic(err.Error()) + } +} + +// TrySetRepeatedFieldByName sets the value for the repeated field with the +// given name and the given index to the given value. An error is returned if +// the given name is unknown, if it names a field that is not repeated (or is a +// map field), or if the given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error { + if index < 0 { + return IndexOutOfRangeError + } + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.setRepeatedField(fd, index, val) +} + +// SetRepeatedFieldByNumber sets the value for the repeated field with the given +// tag number and given index to the given value. It panics if an error is +// encountered. See TrySetRepeatedFieldByNumber. +func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) { + if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil { + panic(err.Error()) + } +} + +// TrySetRepeatedFieldByNumber sets the value for the repeated field with the +// given tag number and the given index to the given value. An error is returned +// if the given tag is unknown, if it indicates a field that is not repeated (or +// is a map field), or if the given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error { + if index < 0 { + return IndexOutOfRangeError + } + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.setRepeatedField(fd, index, val) +} + +func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error { + if fd.IsMap() || !fd.IsRepeated() { + return FieldIsNotRepeatedError + } + val, err := validElementFieldValue(fd, val, false) + if err != nil { + return err + } + sl := m.values[fd.GetNumber()] + if sl == nil { + if sl, err = m.parseUnknownField(fd); err != nil { + return err + } else if sl == nil { + return IndexOutOfRangeError + } + } + res := sl.([]interface{}) + if index >= len(res) { + return IndexOutOfRangeError + } + res[index] = val + return nil +} + +// GetUnknownField gets the value(s) for the given unknown tag number. If this +// message has no unknown fields with the given tag, nil is returned. +func (m *Message) GetUnknownField(tagNumber int32) []UnknownField { + if u, ok := m.unknownFields[tagNumber]; ok { + return u + } else { + return nil + } +} + +func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) { + unks, ok := m.unknownFields[fd.GetNumber()] + if !ok { + return nil, nil + } + var v interface{} + var sl []interface{} + var mp map[interface{}]interface{} + if fd.IsMap() { + mp = map[interface{}]interface{}{} + } + var err error + for _, unk := range unks { + var val interface{} + if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup { + val, err = codec.DecodeLengthDelimitedField(fd, unk.Contents, m.mf) + } else { + val, err = codec.DecodeScalarField(fd, unk.Value) + } + if err != nil { + return nil, err + } + if fd.IsMap() { + newEntry := val.(*Message) + kk, err := newEntry.TryGetFieldByNumber(1) + if err != nil { + return nil, err + } + vv, err := newEntry.TryGetFieldByNumber(2) + if err != nil { + return nil, err + } + mp[kk] = vv + v = mp + } else if fd.IsRepeated() { + t := reflect.TypeOf(val) + if t.Kind() == reflect.Slice && t != typeOfBytes { + // append slices if we unmarshalled a packed repeated field + newVals := val.([]interface{}) + sl = append(sl, newVals...) + } else { + sl = append(sl, val) + } + v = sl + } else { + v = val + } + } + m.internalSetField(fd, v) + return v, nil +} + +func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) { + return validFieldValueForRv(fd, reflect.ValueOf(val)) +} + +func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) { + if fd.IsMap() && val.Kind() == reflect.Map { + return validFieldValueForMapField(fd, val) + } + + if fd.IsRepeated() { // this will also catch map fields where given value was not a map + if val.Kind() != reflect.Array && val.Kind() != reflect.Slice { + if fd.IsMap() { + return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type()) + } else { + return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type()) + } + } + + if fd.IsMap() { + // value should be a slice of entry messages that we need convert into a map[interface{}]interface{} + m := map[interface{}]interface{}{} + for i := 0; i < val.Len(); i++ { + e, err := validElementFieldValue(fd, val.Index(i).Interface(), false) + if err != nil { + return nil, err + } + msg := e.(proto.Message) + dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil) + if err != nil { + return nil, err + } + k, err := dm.TryGetFieldByNumber(1) + if err != nil { + return nil, err + } + v, err := dm.TryGetFieldByNumber(2) + if err != nil { + return nil, err + } + m[k] = v + } + return m, nil + } + + // make a defensive copy while checking contents (also converts to []interface{}) + s := make([]interface{}, val.Len()) + for i := 0; i < val.Len(); i++ { + ev := val.Index(i) + if ev.Kind() == reflect.Interface { + // unwrap it + ev = reflect.ValueOf(ev.Interface()) + } + e, err := validElementFieldValueForRv(fd, ev, false) + if err != nil { + return nil, err + } + s[i] = e + } + + return s, nil + } + + return validElementFieldValueForRv(fd, val, false) +} + +func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) { + if dm, ok := m.(*Message); ok { + return dm, nil + } + dm := NewMessageWithMessageFactory(md, mf) + if err := dm.mergeFrom(m); err != nil { + return nil, err + } + return dm, nil +} + +func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}, allowNilMessage bool) (interface{}, error) { + return validElementFieldValueForRv(fd, reflect.ValueOf(val), allowNilMessage) +} + +func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value, allowNilMessage bool) (interface{}, error) { + t := fd.GetType() + if !val.IsValid() { + return nil, typeError(fd, nil) + } + + switch t { + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_ENUM: + return toInt32(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64: + return toInt64(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + descriptorpb.FieldDescriptorProto_TYPE_UINT32: + return toUint32(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + descriptorpb.FieldDescriptorProto_TYPE_UINT64: + return toUint64(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + return toFloat32(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return toFloat64(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + return toBool(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return toBytes(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return toString(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_GROUP: + m, err := asMessage(val, fd.GetFullyQualifiedName()) + // check that message is correct type + if err != nil { + return nil, err + } + var msgType string + if dm, ok := m.(*Message); ok { + if allowNilMessage && dm == nil { + // if dm == nil, we'll panic below, so early out if that is allowed + // (only allowed for map values, to indicate an entry w/ no value) + return m, nil + } + msgType = dm.GetMessageDescriptor().GetFullyQualifiedName() + } else { + msgType = proto.MessageName(m) + } + if msgType != fd.GetMessageType().GetFullyQualifiedName() { + return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType) + } + return m, nil + + default: + return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType()) + } +} + +func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) { + if v.Kind() == reflect.Int32 { + return int32(v.Int()), nil + } + return 0, typeError(fd, v.Type()) +} + +func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) { + if v.Kind() == reflect.Uint32 { + return uint32(v.Uint()), nil + } + return 0, typeError(fd, v.Type()) +} + +func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) { + if v.Kind() == reflect.Float32 { + return float32(v.Float()), nil + } + return 0, typeError(fd, v.Type()) +} + +func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) { + if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 { + return v.Int(), nil + } + return 0, typeError(fd, v.Type()) +} + +func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) { + if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 { + return v.Uint(), nil + } + return 0, typeError(fd, v.Type()) +} + +func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) { + if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 { + return v.Float(), nil + } + return 0, typeError(fd, v.Type()) +} + +func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) { + if v.Kind() == reflect.Bool { + return v.Bool(), nil + } + return false, typeError(fd, v.Type()) +} + +func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + return v.Bytes(), nil + } + return nil, typeError(fd, v.Type()) +} + +func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) { + if v.Kind() == reflect.String { + return v.String(), nil + } + return "", typeError(fd, v.Type()) +} + +func typeError(fd *desc.FieldDescriptor, t reflect.Type) error { + return fmt.Errorf( + "%s field %s is not compatible with value of type %v", + getTypeString(fd), fd.GetFullyQualifiedName(), t) +} + +func getTypeString(fd *desc.FieldDescriptor) string { + return strings.ToLower(fd.GetType().String()) +} + +func asMessage(v reflect.Value, fieldName string) (proto.Message, error) { + t := v.Type() + // we need a pointer to a struct that implements proto.Message + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) { + return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type()) + } + return v.Interface().(proto.Message), nil +} + +// Reset resets this message to an empty message. It removes all values set in +// the message. +func (m *Message) Reset() { + for k := range m.values { + delete(m.values, k) + } + for k := range m.unknownFields { + delete(m.unknownFields, k) + } +} + +// String returns this message rendered in compact text format. +func (m *Message) String() string { + b, err := m.MarshalText() + if err != nil { + panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error())) + } + return string(b) +} + +// ProtoMessage is present to satisfy the proto.Message interface. +func (m *Message) ProtoMessage() { +} + +// ConvertTo converts this dynamic message into the given message. This is +// shorthand for resetting then merging: +// +// target.Reset() +// m.MergeInto(target) +func (m *Message) ConvertTo(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + + target.Reset() + return m.mergeInto(target, defaultDeterminism) +} + +// ConvertToDeterministic converts this dynamic message into the given message. +// It is just like ConvertTo, but it attempts to produce deterministic results. +// That means that if the target is a generated message (not another dynamic +// message) and the current runtime is unaware of any fields or extensions that +// are present in m, they will be serialized into the target's unrecognized +// fields deterministically. +func (m *Message) ConvertToDeterministic(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + + target.Reset() + return m.mergeInto(target, true) +} + +// ConvertFrom converts the given message into this dynamic message. This is +// shorthand for resetting then merging: +// +// m.Reset() +// m.MergeFrom(target) +func (m *Message) ConvertFrom(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + + m.Reset() + return m.mergeFrom(target) +} + +// MergeInto merges this dynamic message into the given message. All field +// values in this message will be set on the given message. For map fields, +// entries are added to the given message (if the given message has existing +// values for like keys, they are overwritten). For slice fields, elements are +// added. +// +// If the given message has a different set of known fields, it is possible for +// some known fields in this message to be represented as unknown fields in the +// given message after merging, and vice versa. +func (m *Message) MergeInto(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + return m.mergeInto(target, defaultDeterminism) +} + +// MergeIntoDeterministic merges this dynamic message into the given message. +// It is just like MergeInto, but it attempts to produce deterministic results. +// That means that if the target is a generated message (not another dynamic +// message) and the current runtime is unaware of any fields or extensions that +// are present in m, they will be serialized into the target's unrecognized +// fields deterministically. +func (m *Message) MergeIntoDeterministic(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + return m.mergeInto(target, true) +} + +// MergeFrom merges the given message into this dynamic message. All field +// values in the given message will be set on this message. For map fields, +// entries are added to this message (if this message has existing values for +// like keys, they are overwritten). For slice fields, elements are added. +// +// If the given message has a different set of known fields, it is possible for +// some known fields in that message to be represented as unknown fields in this +// message after merging, and vice versa. +func (m *Message) MergeFrom(source proto.Message) error { + if err := m.checkType(source); err != nil { + return err + } + return m.mergeFrom(source) +} + +// Merge implements the proto.Merger interface so that dynamic messages are +// compatible with the proto.Merge function. It delegates to MergeFrom but will +// panic on error as the proto.Merger interface doesn't allow for returning an +// error. +// +// Unlike nearly all other methods, this method can work if this message's type +// is not defined (such as instantiating the message without using NewMessage). +// This is strictly so that dynamic message's are compatible with the +// proto.Clone function, which instantiates a new message via reflection (thus +// its message descriptor will not be set) and than calls Merge. +func (m *Message) Merge(source proto.Message) { + if m.md == nil { + // To support proto.Clone, initialize the descriptor from the source. + if dm, ok := source.(*Message); ok { + m.md = dm.md + // also make sure the clone uses the same message factory and + // extensions and also knows about the same extra fields (if any) + m.mf = dm.mf + m.er = dm.er + m.extraFields = dm.extraFields + } else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil { + panic(err.Error()) + } else { + m.md = md + } + } + + if err := m.MergeFrom(source); err != nil { + panic(err.Error()) + } +} + +func (m *Message) checkType(target proto.Message) error { + if dm, ok := target.(*Message); ok { + if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() { + return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName()) + } + return nil + } + + msgName := proto.MessageName(target) + if msgName != m.md.GetFullyQualifiedName() { + return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName()) + } + return nil +} + +func (m *Message) mergeInto(pm proto.Message, deterministic bool) error { + if dm, ok := pm.(*Message); ok { + return dm.mergeFrom(m) + } + + target := reflect.ValueOf(pm) + if target.Kind() == reflect.Ptr { + target = target.Elem() + } + + // track tags for which the dynamic message has data but the given + // message doesn't know about it + unknownTags := map[int32]struct{}{} + for tag := range m.values { + unknownTags[tag] = struct{}{} + } + + // check that we can successfully do the merge + structProps := proto.GetProperties(reflect.TypeOf(pm).Elem()) + for _, prop := range structProps.Prop { + if prop.Tag == 0 { + continue // one-of or special field (such as XXX_unrecognized, etc.) + } + tag := int32(prop.Tag) + v, ok := m.values[tag] + if !ok { + continue + } + if unknownTags != nil { + delete(unknownTags, tag) + } + f := target.FieldByName(prop.Name) + ft := f.Type() + val := reflect.ValueOf(v) + if !canConvert(val, ft) { + return fmt.Errorf("cannot convert %v to %v", val.Type(), ft) + } + } + // check one-of fields + for _, oop := range structProps.OneofTypes { + prop := oop.Prop + tag := int32(prop.Tag) + v, ok := m.values[tag] + if !ok { + continue + } + if unknownTags != nil { + delete(unknownTags, tag) + } + stf, ok := oop.Type.Elem().FieldByName(prop.Name) + if !ok { + return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem()) + } + ft := stf.Type + val := reflect.ValueOf(v) + if !canConvert(val, ft) { + return fmt.Errorf("cannot convert %v to %v", val.Type(), ft) + } + } + // and check extensions, too + for tag, ext := range proto.RegisteredExtensions(pm) { + v, ok := m.values[tag] + if !ok { + continue + } + if unknownTags != nil { + delete(unknownTags, tag) + } + ft := reflect.TypeOf(ext.ExtensionType) + val := reflect.ValueOf(v) + if !canConvert(val, ft) { + return fmt.Errorf("cannot convert %v to %v", val.Type(), ft) + } + } + + // now actually perform the merge + for _, prop := range structProps.Prop { + v, ok := m.values[int32(prop.Tag)] + if !ok { + continue + } + f := target.FieldByName(prop.Name) + if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil { + return err + } + } + // merge one-ofs + for _, oop := range structProps.OneofTypes { + prop := oop.Prop + tag := int32(prop.Tag) + v, ok := m.values[tag] + if !ok { + continue + } + oov := reflect.New(oop.Type.Elem()) + f := oov.Elem().FieldByName(prop.Name) + if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil { + return err + } + target.Field(oop.Field).Set(oov) + } + // merge extensions, too + for tag, ext := range proto.RegisteredExtensions(pm) { + v, ok := m.values[tag] + if !ok { + continue + } + e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem() + if err := mergeVal(reflect.ValueOf(v), e, deterministic); err != nil { + return err + } + if err := proto.SetExtension(pm, ext, e.Interface()); err != nil { + // shouldn't happen since we already checked that the extension type was compatible above + return err + } + } + + // if we have fields that the given message doesn't know about, add to its unknown fields + if len(unknownTags) > 0 { + var b codec.Buffer + b.SetDeterministic(deterministic) + if deterministic { + // if we need to emit things deterministically, sort the + // extensions by their tag number + sortedUnknownTags := make([]int32, 0, len(unknownTags)) + for tag := range unknownTags { + sortedUnknownTags = append(sortedUnknownTags, tag) + } + sort.Slice(sortedUnknownTags, func(i, j int) bool { + return sortedUnknownTags[i] < sortedUnknownTags[j] + }) + for _, tag := range sortedUnknownTags { + fd := m.FindFieldDescriptor(tag) + if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil { + return err + } + } + } else { + for tag := range unknownTags { + fd := m.FindFieldDescriptor(tag) + if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil { + return err + } + } + } + + internal.SetUnrecognized(pm, b.Bytes()) + } + + // finally, convey unknown fields into the given message by letting it unmarshal them + // (this will append to its unknown fields if not known; if somehow the given message recognizes + // a field even though the dynamic message did not, it will get correctly unmarshalled) + if unknownTags != nil && len(m.unknownFields) > 0 { + var b codec.Buffer + _ = m.marshalUnknownFields(&b) + _ = proto.UnmarshalMerge(b.Bytes(), pm) + } + + return nil +} + +func canConvert(src reflect.Value, target reflect.Type) bool { + if src.Kind() == reflect.Interface { + src = reflect.ValueOf(src.Interface()) + } + srcType := src.Type() + // we allow convertible types instead of requiring exact types so that calling + // code can, for example, assign an enum constant to an enum field. In that case, + // one type is the enum type (a sub-type of int32) and the other may be the int32 + // type. So we automatically do the conversion in that case. + if srcType.ConvertibleTo(target) { + return true + } else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) { + return true + } else if target.Kind() == reflect.Slice { + if srcType.Kind() != reflect.Slice { + return false + } + et := target.Elem() + for i := 0; i < src.Len(); i++ { + if !canConvert(src.Index(i), et) { + return false + } + } + return true + } else if target.Kind() == reflect.Map { + if srcType.Kind() != reflect.Map { + return false + } + return canConvertMap(src, target) + } else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) { + z := reflect.Zero(target).Interface() + msgType := proto.MessageName(z.(proto.Message)) + return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName() + } else { + return false + } +} + +func mergeVal(src, target reflect.Value, deterministic bool) error { + if src.Kind() == reflect.Interface && !src.IsNil() { + src = src.Elem() + } + srcType := src.Type() + targetType := target.Type() + if srcType.ConvertibleTo(targetType) { + if targetType.Implements(typeOfProtoMessage) && !target.IsNil() { + Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message)) + } else { + target.Set(src.Convert(targetType)) + } + } else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) { + if !src.CanAddr() { + target.Set(reflect.New(targetType.Elem())) + target.Elem().Set(src.Convert(targetType.Elem())) + } else { + target.Set(src.Addr().Convert(targetType)) + } + } else if targetType.Kind() == reflect.Slice { + l := target.Len() + newL := l + src.Len() + if target.Cap() < newL { + // expand capacity of the slice and copy + newSl := reflect.MakeSlice(targetType, newL, newL) + for i := 0; i < target.Len(); i++ { + newSl.Index(i).Set(target.Index(i)) + } + target.Set(newSl) + } else { + target.SetLen(newL) + } + for i := 0; i < src.Len(); i++ { + dest := target.Index(l + i) + if dest.Kind() == reflect.Ptr { + dest.Set(reflect.New(dest.Type().Elem())) + } + if err := mergeVal(src.Index(i), dest, deterministic); err != nil { + return err + } + } + } else if targetType.Kind() == reflect.Map { + return mergeMapVal(src, target, targetType, deterministic) + } else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) { + dm := src.Interface().(*Message) + if target.IsNil() { + target.Set(reflect.New(targetType.Elem())) + } + m := target.Interface().(proto.Message) + if err := dm.mergeInto(m, deterministic); err != nil { + return err + } + } else { + return fmt.Errorf("cannot convert %v to %v", srcType, targetType) + } + return nil +} + +func (m *Message) mergeFrom(pm proto.Message) error { + if dm, ok := pm.(*Message); ok { + // if given message is also a dynamic message, we merge differently + for tag, v := range dm.values { + fd := m.FindFieldDescriptor(tag) + if fd == nil { + fd = dm.FindFieldDescriptor(tag) + } + if err := mergeField(m, fd, v); err != nil { + return err + } + } + return nil + } + + pmrv := reflect.ValueOf(pm) + if pmrv.IsNil() { + // nil is an empty message, so nothing to do + return nil + } + + // check that we can successfully do the merge + src := pmrv.Elem() + values := map[*desc.FieldDescriptor]interface{}{} + props := proto.GetProperties(reflect.TypeOf(pm).Elem()) + if props == nil { + return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem()) + } + + // regular fields + for _, prop := range props.Prop { + if prop.Tag == 0 { + continue // one-of or special field (such as XXX_unrecognized, etc.) + } + fd := m.FindFieldDescriptor(int32(prop.Tag)) + if fd == nil { + // Our descriptor has different fields than this message object. So + // try to reflect on the message object's fields. + md, err := desc.LoadMessageDescriptorForMessage(pm) + if err != nil { + return err + } + fd = md.FindFieldByNumber(int32(prop.Tag)) + if fd == nil { + return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name) + } + } + rv := src.FieldByName(prop.Name) + if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() { + continue + } + if v, err := validFieldValueForRv(fd, rv); err != nil { + return err + } else { + values[fd] = v + } + } + + // one-of fields + for _, oop := range props.OneofTypes { + oov := src.Field(oop.Field).Elem() + if !oov.IsValid() || oov.Type() != oop.Type { + // this field is unset (in other words, one-of message field is not currently set to this option) + continue + } + prop := oop.Prop + rv := oov.Elem().FieldByName(prop.Name) + fd := m.FindFieldDescriptor(int32(prop.Tag)) + if fd == nil { + // Our descriptor has different fields than this message object. So + // try to reflect on the message object's fields. + md, err := desc.LoadMessageDescriptorForMessage(pm) + if err != nil { + return err + } + fd = md.FindFieldByNumber(int32(prop.Tag)) + if fd == nil { + return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name) + } + } + if v, err := validFieldValueForRv(fd, rv); err != nil { + return err + } else { + values[fd] = v + } + } + + // extension fields + rexts, _ := proto.ExtensionDescs(pm) + for _, ed := range rexts { + v, _ := proto.GetExtension(pm, ed) + if v == nil { + continue + } + if ed.ExtensionType == nil { + // unrecognized extension: we'll handle that below when we + // handle other unrecognized fields + continue + } + fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field) + if fd == nil { + var err error + if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil { + return err + } + } + if v, err := validFieldValue(fd, v); err != nil { + return err + } else { + values[fd] = v + } + } + + // With API v2, it is possible that the new protoreflect interfaces + // were used to store an extension, which means it can't be returned + // by proto.ExtensionDescs and it's also not in the unrecognized data. + // So we have a separate loop to trawl through it... + var err error + proto.MessageReflect(pm).Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if !fld.IsExtension() { + // normal field... we already got it above + return true + } + xt := fld.(protoreflect.ExtensionTypeDescriptor) + if _, ok := xt.Type().(*proto.ExtensionDesc); ok { + // known extension... we already got it above + return true + } + var fd *desc.FieldDescriptor + fd, err = desc.WrapField(fld) + if err != nil { + return false + } + v := convertProtoReflectValue(val) + if v, err = validFieldValue(fd, v); err != nil { + return false + } + values[fd] = v + return true + }) + if err != nil { + return err + } + + // unrecognized extensions fields: + // In API v2 of proto, some extensions may NEITHER be included in ExtensionDescs + // above NOR included in unrecognized fields below. These are extensions that use + // a custom extension type (not a generated one -- i.e. not a linked in extension). + mr := proto.MessageReflect(pm) + var extBytes []byte + var retErr error + mr.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if !fld.IsExtension() { + // normal field, already processed above + return true + } + if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { + if _, ok := extd.Type().(*proto.ExtensionDesc); ok { + // normal known extension, already processed above + return true + } + } + + // marshal the extension to bytes and then handle as unknown field below + mr.New() + mr.Set(fld, val) + extBytes, retErr = protov2.MarshalOptions{}.MarshalAppend(extBytes, mr.Interface()) + return retErr == nil + }) + if retErr != nil { + return retErr + } + + // now actually perform the merge + for fd, v := range values { + if err := mergeField(m, fd, v); err != nil { + return err + } + } + + if len(extBytes) > 0 { + // treating unrecognized extensions like unknown fields: best-effort + // ignore any error returned: pulling in unknown fields is best-effort + _ = m.UnmarshalMerge(extBytes) + } + + data := internal.GetUnrecognized(pm) + if len(data) > 0 { + // ignore any error returned: pulling in unknown fields is best-effort + _ = m.UnmarshalMerge(data) + } + + return nil +} + +func convertProtoReflectValue(v protoreflect.Value) interface{} { + val := v.Interface() + switch val := val.(type) { + case protoreflect.Message: + return val.Interface() + case protoreflect.Map: + mp := make(map[interface{}]interface{}, val.Len()) + val.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + mp[convertProtoReflectValue(k.Value())] = convertProtoReflectValue(v) + return true + }) + return mp + case protoreflect.List: + sl := make([]interface{}, val.Len()) + for i := 0; i < val.Len(); i++ { + sl[i] = convertProtoReflectValue(val.Get(i)) + } + return sl + case protoreflect.EnumNumber: + return int32(val) + default: + return val + } +} + +// Validate checks that all required fields are present. It returns an error if any are absent. +func (m *Message) Validate() error { + missingFields := m.findMissingFields() + if len(missingFields) == 0 { + return nil + } + return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", ")) +} + +func (m *Message) findMissingFields() []string { + if m.md.IsProto3() { + // proto3 does not allow required fields + return nil + } + var missingFields []string + for _, fd := range m.md.GetFields() { + if fd.IsRequired() { + if _, ok := m.values[fd.GetNumber()]; !ok { + missingFields = append(missingFields, fd.GetName()) + } + } + } + return missingFields +} + +// ValidateRecursive checks that all required fields are present and also +// recursively validates all fields who are also messages. It returns an error +// if any required fields, in this message or nested within, are absent. +func (m *Message) ValidateRecursive() error { + return m.validateRecursive("") +} + +func (m *Message) validateRecursive(prefix string) error { + if missingFields := m.findMissingFields(); len(missingFields) > 0 { + for i := range missingFields { + missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i]) + } + return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", ")) + } + + for tag, fld := range m.values { + fd := m.FindFieldDescriptor(tag) + var chprefix string + var md *desc.MessageDescriptor + checkMsg := func(pm proto.Message) error { + var dm *Message + if d, ok := pm.(*Message); ok { + dm = d + } else if pm != nil { + dm = m.mf.NewDynamicMessage(md) + if err := dm.ConvertFrom(pm); err != nil { + return nil + } + } + if dm == nil { + return nil + } + if err := dm.validateRecursive(chprefix); err != nil { + return err + } + return nil + } + isMap := fd.IsMap() + if isMap && fd.GetMapValueType().GetMessageType() != nil { + md = fd.GetMapValueType().GetMessageType() + mp := fld.(map[interface{}]interface{}) + for k, v := range mp { + chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k) + if err := checkMsg(v.(proto.Message)); err != nil { + return err + } + } + } else if !isMap && fd.GetMessageType() != nil { + md = fd.GetMessageType() + if fd.IsRepeated() { + sl := fld.([]interface{}) + for i, v := range sl { + chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i) + if err := checkMsg(v.(proto.Message)); err != nil { + return err + } + } + } else { + chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd)) + if err := checkMsg(fld.(proto.Message)); err != nil { + return err + } + } + } + } + + return nil +} + +func getName(fd *desc.FieldDescriptor) string { + if fd.IsExtension() { + return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName()) + } else { + return fd.GetName() + } +} + +// knownFieldTags return tags of present and recognized fields, in sorted order. +func (m *Message) knownFieldTags() []int { + if len(m.values) == 0 { + return []int(nil) + } + + keys := make([]int, len(m.values)) + i := 0 + for k := range m.values { + keys[i] = int(k) + i++ + } + + sort.Ints(keys) + return keys +} + +// allKnownFieldTags return tags of present and recognized fields, including +// those that are unset, in sorted order. This only includes extensions that are +// present. Known but not-present extensions are not included in the returned +// set of tags. +func (m *Message) allKnownFieldTags() []int { + fds := m.md.GetFields() + keys := make([]int, 0, len(fds)+len(m.extraFields)) + + for k := range m.values { + keys = append(keys, int(k)) + } + + // also include known fields that are not present + for _, fd := range fds { + if _, ok := m.values[fd.GetNumber()]; !ok { + keys = append(keys, int(fd.GetNumber())) + } + } + for _, fd := range m.extraFields { + if !fd.IsExtension() { // skip extensions that are not present + if _, ok := m.values[fd.GetNumber()]; !ok { + keys = append(keys, int(fd.GetNumber())) + } + } + } + + sort.Ints(keys) + return keys +} + +// unknownFieldTags return tags of present but unrecognized fields, in sorted order. +func (m *Message) unknownFieldTags() []int { + if len(m.unknownFields) == 0 { + return []int(nil) + } + keys := make([]int, len(m.unknownFields)) + i := 0 + for k := range m.unknownFields { + keys[i] = int(k) + i++ + } + sort.Ints(keys) + return keys +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go new file mode 100644 index 00000000..e44c6c53 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go @@ -0,0 +1,157 @@ +package dynamic + +import ( + "bytes" + "reflect" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they +// have the same message type and same fields set to equal values. For proto3 messages, fields set +// to their zero value are considered unset. +func Equal(a, b *Message) bool { + if a == b { + return true + } + if (a == nil) != (b == nil) { + return false + } + if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() { + return false + } + if len(a.values) != len(b.values) { + return false + } + if len(a.unknownFields) != len(b.unknownFields) { + return false + } + for tag, aval := range a.values { + bval, ok := b.values[tag] + if !ok { + return false + } + if !fieldsEqual(aval, bval) { + return false + } + } + for tag, au := range a.unknownFields { + bu, ok := b.unknownFields[tag] + if !ok { + return false + } + if len(au) != len(bu) { + return false + } + for i, aval := range au { + bval := bu[i] + if aval.Encoding != bval.Encoding { + return false + } + if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup { + if !bytes.Equal(aval.Contents, bval.Contents) { + return false + } + } else if aval.Value != bval.Value { + return false + } + } + } + // all checks pass! + return true +} + +func fieldsEqual(aval, bval interface{}) bool { + arv := reflect.ValueOf(aval) + brv := reflect.ValueOf(bval) + if arv.Type() != brv.Type() { + // it is possible that one is a dynamic message and one is not + apm, ok := aval.(proto.Message) + if !ok { + return false + } + bpm, ok := bval.(proto.Message) + if !ok { + return false + } + return MessagesEqual(apm, bpm) + + } else { + switch arv.Kind() { + case reflect.Ptr: + apm, ok := aval.(proto.Message) + if !ok { + // Don't know how to compare pointer values that aren't messages! + // Maybe this should panic? + return false + } + bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type + return MessagesEqual(apm, bpm) + + case reflect.Map: + return mapsEqual(arv, brv) + + case reflect.Slice: + if arv.Type() == typeOfBytes { + return bytes.Equal(aval.([]byte), bval.([]byte)) + } else { + return slicesEqual(arv, brv) + } + + default: + return aval == bval + } + } +} + +func slicesEqual(a, b reflect.Value) bool { + if a.Len() != b.Len() { + return false + } + for i := 0; i < a.Len(); i++ { + ai := a.Index(i) + bi := b.Index(i) + if !fieldsEqual(ai.Interface(), bi.Interface()) { + return false + } + } + return true +} + +// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal +// when one or both of the messages might be a dynamic message. +func MessagesEqual(a, b proto.Message) bool { + da, aok := a.(*Message) + db, bok := b.(*Message) + // Both dynamic messages + if aok && bok { + return Equal(da, db) + } + // Neither dynamic messages + if !aok && !bok { + return proto.Equal(a, b) + } + // Mixed + if bok { + // we want a to be the dynamic one + b, da = a, db + } + + // Instead of panic'ing below if we have a nil dynamic message, check + // now and return false if the input message is not also nil. + if da == nil { + return isNil(b) + } + + md, err := desc.LoadMessageDescriptorForMessage(b) + if err != nil { + return false + } + db = NewMessageWithMessageFactory(md, da.mf) + if db.ConvertFrom(b) != nil { + return false + } + return Equal(da, db) +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go new file mode 100644 index 00000000..1d381610 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go @@ -0,0 +1,46 @@ +package dynamic + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/codec" + "github.com/jhump/protoreflect/desc" +) + +// SetExtension sets the given extension value. If the given message is not a +// dynamic message, the given extension may not be recognized (or may differ +// from the compiled and linked in version of the extension. So in that case, +// this function will serialize the given value to bytes and then use +// proto.SetRawExtension to set the value. +func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error { + if !extd.IsExtension() { + return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName()) + } + + if dm, ok := msg.(*Message); ok { + return dm.TrySetField(extd, val) + } + + md, err := desc.LoadMessageDescriptorForMessage(msg) + if err != nil { + return err + } + if err := checkField(extd, md); err != nil { + return err + } + + val, err = validFieldValue(extd, val) + if err != nil { + return err + } + + var b codec.Buffer + b.SetDeterministic(defaultDeterminism) + if err := b.EncodeFieldValue(extd, val); err != nil { + return err + } + proto.SetRawExtension(msg, extd.GetNumber(), b.Bytes()) + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go new file mode 100644 index 00000000..68768278 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go @@ -0,0 +1,241 @@ +package dynamic + +import ( + "fmt" + "reflect" + "sync" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// ExtensionRegistry is a registry of known extension fields. This is used to parse +// extension fields encountered when de-serializing a dynamic message. +type ExtensionRegistry struct { + includeDefault bool + mu sync.RWMutex + exts map[string]map[int32]*desc.FieldDescriptor +} + +// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions, +// which are those that are statically linked into the current program (e.g. registered by +// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the +// registry will override any default extensions that are for the same extendee and have the +// same tag number and/or name. +func NewExtensionRegistryWithDefaults() *ExtensionRegistry { + return &ExtensionRegistry{includeDefault: true} +} + +// AddExtensionDesc adds the given extensions to the registry. +func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error { + flds := make([]*desc.FieldDescriptor, len(exts)) + for i, ext := range exts { + fd, err := desc.LoadFieldDescriptorForExtension(ext) + if err != nil { + return err + } + flds[i] = fd + } + r.mu.Lock() + defer r.mu.Unlock() + if r.exts == nil { + r.exts = map[string]map[int32]*desc.FieldDescriptor{} + } + for _, fd := range flds { + r.putExtensionLocked(fd) + } + return nil +} + +// AddExtension adds the given extensions to the registry. The given extensions +// will overwrite any previously added extensions that are for the same extendee +// message and same extension tag number. +func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error { + for _, ext := range exts { + if !ext.IsExtension() { + return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName()) + } + } + r.mu.Lock() + defer r.mu.Unlock() + if r.exts == nil { + r.exts = map[string]map[int32]*desc.FieldDescriptor{} + } + for _, ext := range exts { + r.putExtensionLocked(ext) + } + return nil +} + +// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor. +func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) { + r.mu.Lock() + defer r.mu.Unlock() + r.addExtensionsFromFileLocked(fd, false, nil) +} + +// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file +// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds +// extensions from the entire transitive closure for the given file. +func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) { + r.mu.Lock() + defer r.mu.Unlock() + already := map[*desc.FileDescriptor]struct{}{} + r.addExtensionsFromFileLocked(fd, true, already) +} + +func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) { + if _, ok := alreadySeen[fd]; ok { + return + } + + if r.exts == nil { + r.exts = map[string]map[int32]*desc.FieldDescriptor{} + } + for _, ext := range fd.GetExtensions() { + r.putExtensionLocked(ext) + } + for _, msg := range fd.GetMessageTypes() { + r.addExtensionsFromMessageLocked(msg) + } + + if recursive { + alreadySeen[fd] = struct{}{} + for _, dep := range fd.GetDependencies() { + r.addExtensionsFromFileLocked(dep, recursive, alreadySeen) + } + } +} + +func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) { + for _, ext := range md.GetNestedExtensions() { + r.putExtensionLocked(ext) + } + for _, msg := range md.GetNestedMessageTypes() { + r.addExtensionsFromMessageLocked(msg) + } +} + +func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) { + msgName := fd.GetOwner().GetFullyQualifiedName() + m := r.exts[msgName] + if m == nil { + m = map[int32]*desc.FieldDescriptor{} + r.exts[msgName] = m + } + m[fd.GetNumber()] = fd +} + +// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified +// message name) and tag number. If no extension is known, nil is returned. +func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor { + if r == nil { + return nil + } + r.mu.RLock() + defer r.mu.RUnlock() + fd := r.exts[messageName][tagNumber] + if fd == nil && r.includeDefault { + ext := getDefaultExtensions(messageName)[tagNumber] + if ext != nil { + fd, _ = desc.LoadFieldDescriptorForExtension(ext) + } + } + return fd +} + +// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified +// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil +// is returned. +func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor { + if r == nil { + return nil + } + r.mu.RLock() + defer r.mu.RUnlock() + for _, fd := range r.exts[messageName] { + if fd.GetFullyQualifiedName() == fieldName { + return fd + } + } + if r.includeDefault { + for _, ext := range getDefaultExtensions(messageName) { + fd, _ := desc.LoadFieldDescriptorForExtension(ext) + if fd.GetFullyQualifiedName() == fieldName { + return fd + } + } + } + return nil +} + +// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified +// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned. +// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last +// component uses the field's JSON name (if present). +func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor { + if r == nil { + return nil + } + r.mu.RLock() + defer r.mu.RUnlock() + for _, fd := range r.exts[messageName] { + if fd.GetFullyQualifiedJSONName() == fieldName { + return fd + } + } + if r.includeDefault { + for _, ext := range getDefaultExtensions(messageName) { + fd, _ := desc.LoadFieldDescriptorForExtension(ext) + if fd.GetFullyQualifiedJSONName() == fieldName { + return fd + } + } + } + return nil +} + +func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc { + t := proto.MessageType(messageName) + if t != nil { + msg := reflect.Zero(t).Interface().(proto.Message) + return proto.RegisteredExtensions(msg) + } + return nil +} + +// AllExtensionsForType returns all known extension fields for the given extendee name (must be a +// fully-qualified message name). +func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor { + if r == nil { + return []*desc.FieldDescriptor(nil) + } + r.mu.RLock() + defer r.mu.RUnlock() + flds := r.exts[messageName] + var ret []*desc.FieldDescriptor + if r.includeDefault { + exts := getDefaultExtensions(messageName) + if len(exts) > 0 || len(flds) > 0 { + ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds)) + } + for tag, ext := range exts { + if _, ok := flds[tag]; ok { + // skip default extension and use the one explicitly registered instead + continue + } + fd, _ := desc.LoadFieldDescriptorForExtension(ext) + if fd != nil { + ret = append(ret, fd) + } + } + } else if len(flds) > 0 { + ret = make([]*desc.FieldDescriptor, 0, len(flds)) + } + + for _, ext := range flds { + ret = append(ret, ext) + } + return ret +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go new file mode 100644 index 00000000..6fca3937 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go @@ -0,0 +1,310 @@ +// Package grpcdynamic provides a dynamic RPC stub. It can be used to invoke RPC +// method where only method descriptors are known. The actual request and response +// messages may be dynamic messages. +package grpcdynamic + +import ( + "context" + "fmt" + "io" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/dynamic" +) + +// Stub is an RPC client stub, used for dynamically dispatching RPCs to a server. +type Stub struct { + channel Channel + mf *dynamic.MessageFactory +} + +// Channel represents the operations necessary to issue RPCs via gRPC. The +// *grpc.ClientConn type provides this interface and will typically the concrete +// type used to construct Stubs. But the use of this interface allows +// construction of stubs that use alternate concrete types as the transport for +// RPC operations. +type Channel = grpc.ClientConnInterface + +// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs. +func NewStub(channel Channel) Stub { + return NewStubWithMessageFactory(channel, nil) +} + +// NewStubWithMessageFactory creates a new RPC stub that uses the given channel for +// dispatching RPCs and the given MessageFactory for creating response messages. +func NewStubWithMessageFactory(channel Channel, mf *dynamic.MessageFactory) Stub { + return Stub{channel: channel, mf: mf} +} + +func requestMethod(md *desc.MethodDescriptor) string { + return fmt.Sprintf("/%s/%s", md.GetService().GetFullyQualifiedName(), md.GetName()) +} + +// InvokeRpc sends a unary RPC and returns the response. Use this for unary methods. +func (s Stub) InvokeRpc(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (proto.Message, error) { + if method.IsClientStreaming() || method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpc is for unary methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + if err := checkMessageType(method.GetInputType(), request); err != nil { + return nil, err + } + resp := s.mf.NewMessage(method.GetOutputType()) + if err := s.channel.Invoke(ctx, requestMethod(method), request, resp, opts...); err != nil { + return nil, err + } + return resp, nil +} + +// InvokeRpcServerStream sends a unary RPC and returns the response stream. Use this for server-streaming methods. +func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (*ServerStream, error) { + if method.IsClientStreaming() || !method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpcServerStream is for server-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + if err := checkMessageType(method.GetInputType(), request); err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(ctx) + sd := grpc.StreamDesc{ + StreamName: method.GetName(), + ServerStreams: method.IsServerStreaming(), + ClientStreams: method.IsClientStreaming(), + } + if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + cancel() + return nil, err + } else { + err = cs.SendMsg(request) + if err != nil { + cancel() + return nil, err + } + err = cs.CloseSend() + if err != nil { + cancel() + return nil, err + } + go func() { + // when the new stream is finished, also cleanup the parent context + <-cs.Context().Done() + cancel() + }() + return &ServerStream{cs, method.GetOutputType(), s.mf}, nil + } +} + +// InvokeRpcClientStream creates a new stream that is used to send request messages and, at the end, +// receive the response message. Use this for client-streaming methods. +func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*ClientStream, error) { + if !method.IsClientStreaming() || method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpcClientStream is for client-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + ctx, cancel := context.WithCancel(ctx) + sd := grpc.StreamDesc{ + StreamName: method.GetName(), + ServerStreams: method.IsServerStreaming(), + ClientStreams: method.IsClientStreaming(), + } + if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + cancel() + return nil, err + } else { + go func() { + // when the new stream is finished, also cleanup the parent context + <-cs.Context().Done() + cancel() + }() + return &ClientStream{cs, method, s.mf, cancel}, nil + } +} + +// InvokeRpcBidiStream creates a new stream that is used to both send request messages and receive response +// messages. Use this for bidi-streaming methods. +func (s Stub) InvokeRpcBidiStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*BidiStream, error) { + if !method.IsClientStreaming() || !method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpcBidiStream is for bidi-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + sd := grpc.StreamDesc{ + StreamName: method.GetName(), + ServerStreams: method.IsServerStreaming(), + ClientStreams: method.IsClientStreaming(), + } + if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + return nil, err + } else { + return &BidiStream{cs, method.GetInputType(), method.GetOutputType(), s.mf}, nil + } +} + +func methodType(md *desc.MethodDescriptor) string { + if md.IsClientStreaming() && md.IsServerStreaming() { + return "bidi-streaming" + } else if md.IsClientStreaming() { + return "client-streaming" + } else if md.IsServerStreaming() { + return "server-streaming" + } else { + return "unary" + } +} + +func checkMessageType(md *desc.MessageDescriptor, msg proto.Message) error { + var typeName string + if dm, ok := msg.(*dynamic.Message); ok { + typeName = dm.GetMessageDescriptor().GetFullyQualifiedName() + } else { + typeName = proto.MessageName(msg) + } + if typeName != md.GetFullyQualifiedName() { + return fmt.Errorf("expecting message of type %s; got %s", md.GetFullyQualifiedName(), typeName) + } + return nil +} + +// ServerStream represents a response stream from a server. Messages in the stream can be queried +// as can header and trailer metadata sent by the server. +type ServerStream struct { + stream grpc.ClientStream + respType *desc.MessageDescriptor + mf *dynamic.MessageFactory +} + +// Header returns any header metadata sent by the server (blocks if necessary until headers are +// received). +func (s *ServerStream) Header() (metadata.MD, error) { + return s.stream.Header() +} + +// Trailer returns the trailer metadata sent by the server. It must only be called after +// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream). +func (s *ServerStream) Trailer() metadata.MD { + return s.stream.Trailer() +} + +// Context returns the context associated with this streaming operation. +func (s *ServerStream) Context() context.Context { + return s.stream.Context() +} + +// RecvMsg returns the next message in the response stream or an error. If the stream +// has completed normally, the error is io.EOF. Otherwise, the error indicates the +// nature of the abnormal termination of the stream. +func (s *ServerStream) RecvMsg() (proto.Message, error) { + resp := s.mf.NewMessage(s.respType) + if err := s.stream.RecvMsg(resp); err != nil { + return nil, err + } else { + return resp, nil + } +} + +// ClientStream represents a response stream from a client. Messages in the stream can be sent +// and, when done, the unary server message and header and trailer metadata can be queried. +type ClientStream struct { + stream grpc.ClientStream + method *desc.MethodDescriptor + mf *dynamic.MessageFactory + cancel context.CancelFunc +} + +// Header returns any header metadata sent by the server (blocks if necessary until headers are +// received). +func (s *ClientStream) Header() (metadata.MD, error) { + return s.stream.Header() +} + +// Trailer returns the trailer metadata sent by the server. It must only be called after +// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream). +func (s *ClientStream) Trailer() metadata.MD { + return s.stream.Trailer() +} + +// Context returns the context associated with this streaming operation. +func (s *ClientStream) Context() context.Context { + return s.stream.Context() +} + +// SendMsg sends a request message to the server. +func (s *ClientStream) SendMsg(m proto.Message) error { + if err := checkMessageType(s.method.GetInputType(), m); err != nil { + return err + } + return s.stream.SendMsg(m) +} + +// CloseAndReceive closes the outgoing request stream and then blocks for the server's response. +func (s *ClientStream) CloseAndReceive() (proto.Message, error) { + if err := s.stream.CloseSend(); err != nil { + return nil, err + } + resp := s.mf.NewMessage(s.method.GetOutputType()) + if err := s.stream.RecvMsg(resp); err != nil { + return nil, err + } + // make sure we get EOF for a second message + if err := s.stream.RecvMsg(resp); err != io.EOF { + if err == nil { + s.cancel() + return nil, fmt.Errorf("client-streaming method %q returned more than one response message", s.method.GetFullyQualifiedName()) + } else { + return nil, err + } + } + return resp, nil +} + +// BidiStream represents a bi-directional stream for sending messages to and receiving +// messages from a server. The header and trailer metadata sent by the server can also be +// queried. +type BidiStream struct { + stream grpc.ClientStream + reqType *desc.MessageDescriptor + respType *desc.MessageDescriptor + mf *dynamic.MessageFactory +} + +// Header returns any header metadata sent by the server (blocks if necessary until headers are +// received). +func (s *BidiStream) Header() (metadata.MD, error) { + return s.stream.Header() +} + +// Trailer returns the trailer metadata sent by the server. It must only be called after +// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream). +func (s *BidiStream) Trailer() metadata.MD { + return s.stream.Trailer() +} + +// Context returns the context associated with this streaming operation. +func (s *BidiStream) Context() context.Context { + return s.stream.Context() +} + +// SendMsg sends a request message to the server. +func (s *BidiStream) SendMsg(m proto.Message) error { + if err := checkMessageType(s.reqType, m); err != nil { + return err + } + return s.stream.SendMsg(m) +} + +// CloseSend indicates the request stream has ended. Invoke this after all request messages +// are sent (even if there are zero such messages). +func (s *BidiStream) CloseSend() error { + return s.stream.CloseSend() +} + +// RecvMsg returns the next message in the response stream or an error. If the stream +// has completed normally, the error is io.EOF. Otherwise, the error indicates the +// nature of the abnormal termination of the stream. +func (s *BidiStream) RecvMsg() (proto.Message, error) { + resp := s.mf.NewMessage(s.respType) + if err := s.stream.RecvMsg(resp); err != nil { + return nil, err + } else { + return resp, nil + } +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go new file mode 100644 index 00000000..bd7fcaa5 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go @@ -0,0 +1,76 @@ +package dynamic + +import "bytes" + +type indentBuffer struct { + bytes.Buffer + indent string + indentCount int + comma bool +} + +func (b *indentBuffer) start() error { + if b.indentCount >= 0 { + b.indentCount++ + return b.newLine(false) + } + return nil +} + +func (b *indentBuffer) sep() error { + if b.indentCount >= 0 { + _, err := b.WriteString(": ") + return err + } else { + return b.WriteByte(':') + } +} + +func (b *indentBuffer) end() error { + if b.indentCount >= 0 { + b.indentCount-- + return b.newLine(false) + } + return nil +} + +func (b *indentBuffer) maybeNext(first *bool) error { + if *first { + *first = false + return nil + } else { + return b.next() + } +} + +func (b *indentBuffer) next() error { + if b.indentCount >= 0 { + return b.newLine(b.comma) + } else if b.comma { + return b.WriteByte(',') + } else { + return b.WriteByte(' ') + } +} + +func (b *indentBuffer) newLine(comma bool) error { + if comma { + err := b.WriteByte(',') + if err != nil { + return err + } + } + + err := b.WriteByte('\n') + if err != nil { + return err + } + + for i := 0; i < b.indentCount; i++ { + _, err := b.WriteString(b.indent) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go new file mode 100644 index 00000000..9081965f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go @@ -0,0 +1,1256 @@ +package dynamic + +// JSON marshalling and unmarshalling for dynamic messages + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + // link in the well-known-types that have a special JSON format + _ "google.golang.org/protobuf/types/known/anypb" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/emptypb" + _ "google.golang.org/protobuf/types/known/structpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + _ "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/jhump/protoreflect/desc" +) + +var wellKnownTypeNames = map[string]struct{}{ + "google.protobuf.Any": {}, + "google.protobuf.Empty": {}, + "google.protobuf.Duration": {}, + "google.protobuf.Timestamp": {}, + // struct.proto + "google.protobuf.Struct": {}, + "google.protobuf.Value": {}, + "google.protobuf.ListValue": {}, + // wrappers.proto + "google.protobuf.DoubleValue": {}, + "google.protobuf.FloatValue": {}, + "google.protobuf.Int64Value": {}, + "google.protobuf.UInt64Value": {}, + "google.protobuf.Int32Value": {}, + "google.protobuf.UInt32Value": {}, + "google.protobuf.BoolValue": {}, + "google.protobuf.StringValue": {}, + "google.protobuf.BytesValue": {}, +} + +// MarshalJSON serializes this message to bytes in JSON format, returning an +// error if the operation fails. The resulting bytes will be a valid UTF8 +// string. +// +// This method uses a compact form: no newlines, and spaces between fields and +// between field identifiers and values are elided. +// +// This method is convenient shorthand for invoking MarshalJSONPB with a default +// (zero value) marshaler: +// +// m.MarshalJSONPB(&jsonpb.Marshaler{}) +// +// So enums are serialized using enum value name strings, and values that are +// not present (including those with default/zero value for messages defined in +// "proto3" syntax) are omitted. +func (m *Message) MarshalJSON() ([]byte, error) { + return m.MarshalJSONPB(&jsonpb.Marshaler{}) +} + +// MarshalJSONIndent serializes this message to bytes in JSON format, returning +// an error if the operation fails. The resulting bytes will be a valid UTF8 +// string. +// +// This method uses a "pretty-printed" form, with each field on its own line and +// spaces between field identifiers and values. Indentation of two spaces is +// used. +// +// This method is convenient shorthand for invoking MarshalJSONPB with a default +// (zero value) marshaler: +// +// m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "}) +// +// So enums are serialized using enum value name strings, and values that are +// not present (including those with default/zero value for messages defined in +// "proto3" syntax) are omitted. +func (m *Message) MarshalJSONIndent() ([]byte, error) { + return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "}) +} + +// MarshalJSONPB serializes this message to bytes in JSON format, returning an +// error if the operation fails. The resulting bytes will be a valid UTF8 +// string. The given marshaler is used to convey options used during marshaling. +// +// If this message contains nested messages that are generated message types (as +// opposed to dynamic messages), the given marshaler is used to marshal it. +// +// When marshaling any nested messages, any jsonpb.AnyResolver configured in the +// given marshaler is augmented with knowledge of message types known to this +// message's descriptor (and its enclosing file and set of transitive +// dependencies). +func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) { + var b indentBuffer + b.indent = opts.Indent + if len(opts.Indent) == 0 { + b.indentCount = -1 + } + b.comma = true + if err := m.marshalJSON(&b, opts); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error { + if m == nil { + _, err := b.WriteString("null") + return err + } + if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed { + newOpts := *opts + newOpts.AnyResolver = r + opts = &newOpts + } + + if ok, err := marshalWellKnownType(m, b, opts); ok { + return err + } + + err := b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + var tags []int + if opts.EmitDefaults { + tags = m.allKnownFieldTags() + } else { + tags = m.knownFieldTags() + } + + first := true + + for _, tag := range tags { + itag := int32(tag) + fd := m.FindFieldDescriptor(itag) + + v, ok := m.values[itag] + if !ok { + if fd.GetOneOf() != nil { + // don't print defaults for fields in a oneof + continue + } + v = fd.GetDefaultValue() + } + + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldJSON(b, fd, v, opts) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + err = b.WriteByte('}') + if err != nil { + return err + } + + return nil +} + +func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) { + fqn := m.md.GetFullyQualifiedName() + if _, ok := wellKnownTypeNames[fqn]; !ok { + return false, nil + } + + msgType := proto.MessageType(fqn) + if msgType == nil { + // wtf? + panic(fmt.Sprintf("could not find registered message type for %q", fqn)) + } + + // convert dynamic message to well-known type and let jsonpb marshal it + msg := reflect.New(msgType.Elem()).Interface().(proto.Message) + if err := m.MergeInto(msg); err != nil { + return true, err + } + return true, opts.Marshal(b, msg) +} + +func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error { + var jsonName string + if opts.OrigName { + jsonName = fd.GetName() + } else { + jsonName = fd.AsFieldDescriptorProto().GetJsonName() + if jsonName == "" { + jsonName = fd.GetName() + } + } + if fd.IsExtension() { + var scope string + switch parent := fd.GetParent().(type) { + case *desc.FileDescriptor: + scope = parent.GetPackage() + default: + scope = parent.GetFullyQualifiedName() + } + if scope == "" { + jsonName = fmt.Sprintf("[%s]", jsonName) + } else { + jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName) + } + } + err := writeJsonString(b, jsonName) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + + if isNil(v) { + _, err := b.WriteString("null") + return err + } + + if fd.IsMap() { + err = b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + md := fd.GetMessageType() + vfd := md.FindFieldByNumber(2) + + mp := v.(map[interface{}]interface{}) + keys := make([]interface{}, 0, len(mp)) + for k := range mp { + keys = append(keys, k) + } + sort.Sort(sortable(keys)) + first := true + for _, mk := range keys { + mv := mp[mk] + err := b.maybeNext(&first) + if err != nil { + return err + } + + err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + return b.WriteByte('}') + + } else if fd.IsRepeated() { + err = b.WriteByte('[') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + sl := v.([]interface{}) + first := true + for _, slv := range sl { + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldValueJSON(b, fd, slv, opts) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + return b.WriteByte(']') + + } else { + return marshalKnownFieldValueJSON(b, fd, v, opts) + } +} + +// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64), +// bools, or strings. +type sortable []interface{} + +func (s sortable) Len() int { + return len(s) +} + +func (s sortable) Less(i, j int) bool { + vi := s[i] + vj := s[j] + switch reflect.TypeOf(vi).Kind() { + case reflect.Int32: + return vi.(int32) < vj.(int32) + case reflect.Int64: + return vi.(int64) < vj.(int64) + case reflect.Uint32: + return vi.(uint32) < vj.(uint32) + case reflect.Uint64: + return vi.(uint64) < vj.(uint64) + case reflect.String: + return vi.(string) < vj.(string) + case reflect.Bool: + return !vi.(bool) && vj.(bool) + default: + panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi))) + } +} + +func (s sortable) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() == reflect.Ptr && rv.IsNil() +} + +func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error { + rk := reflect.ValueOf(mk) + var strkey string + switch rk.Kind() { + case reflect.Bool: + strkey = strconv.FormatBool(rk.Bool()) + case reflect.Int32, reflect.Int64: + strkey = strconv.FormatInt(rk.Int(), 10) + case reflect.Uint32, reflect.Uint64: + strkey = strconv.FormatUint(rk.Uint(), 10) + case reflect.String: + strkey = rk.String() + default: + return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type()) + } + err := writeJsonString(b, strkey) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + return marshalKnownFieldValueJSON(b, vfd, mv, opts) +} + +func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Int64: + return writeJsonString(b, strconv.FormatInt(rv.Int(), 10)) + case reflect.Int32: + ed := fd.GetEnumType() + if !opts.EnumsAsInts && ed != nil { + n := int32(rv.Int()) + vd := ed.FindValueByNumber(n) + if vd == nil { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } else { + return writeJsonString(b, vd.GetName()) + } + } else { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } + case reflect.Uint64: + return writeJsonString(b, strconv.FormatUint(rv.Uint(), 10)) + case reflect.Uint32: + _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10)) + return err + case reflect.Float32, reflect.Float64: + f := rv.Float() + var str string + if math.IsNaN(f) { + str = `"NaN"` + } else if math.IsInf(f, 1) { + str = `"Infinity"` + } else if math.IsInf(f, -1) { + str = `"-Infinity"` + } else { + var bits int + if rv.Kind() == reflect.Float32 { + bits = 32 + } else { + bits = 64 + } + str = strconv.FormatFloat(rv.Float(), 'g', -1, bits) + } + _, err := b.WriteString(str) + return err + case reflect.Bool: + _, err := b.WriteString(strconv.FormatBool(rv.Bool())) + return err + case reflect.Slice: + bstr := base64.StdEncoding.EncodeToString(rv.Bytes()) + return writeJsonString(b, bstr) + case reflect.String: + return writeJsonString(b, rv.String()) + default: + // must be a message + if isNil(v) { + _, err := b.WriteString("null") + return err + } + + if dm, ok := v.(*Message); ok { + return dm.marshalJSON(b, opts) + } + + var err error + if b.indentCount <= 0 || len(b.indent) == 0 { + err = opts.Marshal(b, v.(proto.Message)) + } else { + str, err := opts.MarshalToString(v.(proto.Message)) + if err != nil { + return err + } + indent := strings.Repeat(b.indent, b.indentCount) + pos := 0 + // add indention prefix to each line + for pos < len(str) { + start := pos + nextPos := strings.Index(str[pos:], "\n") + if nextPos == -1 { + nextPos = len(str) + } else { + nextPos = pos + nextPos + 1 // include newline + } + line := str[start:nextPos] + if pos > 0 { + _, err = b.WriteString(indent) + if err != nil { + return err + } + } + _, err = b.WriteString(line) + if err != nil { + return err + } + pos = nextPos + } + } + return err + } +} + +func writeJsonString(b *indentBuffer, s string) error { + if sbytes, err := json.Marshal(s); err != nil { + return err + } else { + _, err := b.Write(sbytes) + return err + } +} + +// UnmarshalJSON de-serializes the message that is present, in JSON format, in +// the given bytes into this message. It first resets the current message. It +// returns an error if the given bytes do not contain a valid encoding of this +// message type in JSON format. +// +// This method is shorthand for invoking UnmarshalJSONPB with a default (zero +// value) unmarshaler: +// +// m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js) +// +// So unknown fields will result in an error, and no provided jsonpb.AnyResolver +// will be used when parsing google.protobuf.Any messages. +func (m *Message) UnmarshalJSON(js []byte) error { + return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js) +} + +// UnmarshalMergeJSON de-serializes the message that is present, in JSON format, +// in the given bytes into this message. Unlike UnmarshalJSON, it does not first +// reset the message, instead merging the data in the given bytes into the +// existing data in this message. +func (m *Message) UnmarshalMergeJSON(js []byte) error { + return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js) +} + +// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in +// the given bytes into this message. The given unmarshaler conveys options used +// when parsing the JSON. This function first resets the current message. It +// returns an error if the given bytes do not contain a valid encoding of this +// message type in JSON format. +// +// The decoding is lenient: +// 1. The JSON can refer to fields either by their JSON name or by their +// declared name. +// 2. The JSON can use either numeric values or string names for enum values. +// +// When instantiating nested messages, if this message's associated factory +// returns a generated message type (as opposed to a dynamic message), the given +// unmarshaler is used to unmarshal it. +// +// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in +// the given unmarshaler is augmented with knowledge of message types known to +// this message's descriptor (and its enclosing file and set of transitive +// dependencies). +func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error { + m.Reset() + if err := m.UnmarshalMergeJSONPB(opts, js); err != nil { + return err + } + return m.Validate() +} + +// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON +// format, in the given bytes into this message. The given unmarshaler conveys +// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first +// reset the message, instead merging the data in the given bytes into the +// existing data in this message. +func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error { + r := newJsReader(js) + err := m.unmarshalJson(r, opts) + if err != nil { + return err + } + if t, err := r.poll(); err != io.EOF { + b, _ := ioutil.ReadAll(r.unread()) + s := fmt.Sprintf("%v%s", t, string(b)) + return fmt.Errorf("superfluous data found after JSON object: %q", s) + } + return nil +} + +func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) { + fqn := m.md.GetFullyQualifiedName() + if _, ok := wellKnownTypeNames[fqn]; !ok { + return false, nil + } + + msgType := proto.MessageType(fqn) + if msgType == nil { + // wtf? + panic(fmt.Sprintf("could not find registered message type for %q", fqn)) + } + + // extract json value from r + var js json.RawMessage + if err := json.NewDecoder(r.unread()).Decode(&js); err != nil { + return true, err + } + if err := r.skip(); err != nil { + return true, err + } + + // unmarshal into well-known type and then convert to dynamic message + msg := reflect.New(msgType.Elem()).Interface().(proto.Message) + if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil { + return true, err + } + return true, m.MergeFrom(msg) +} + +func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error { + if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed { + newOpts := *opts + newOpts.AnyResolver = r + opts = &newOpts + } + + if ok, err := unmarshalWellKnownType(m, r, opts); ok { + return err + } + + t, err := r.peek() + if err != nil { + return err + } + if t == nil { + // if json is simply "null" we do nothing + r.poll() + return nil + } + + if err := r.beginObject(); err != nil { + return err + } + + for r.hasNext() { + f, err := r.nextObjectKey() + if err != nil { + return err + } + fd := m.FindFieldDescriptorByJSONName(f) + if fd == nil { + if opts.AllowUnknownFields { + r.skip() + continue + } + return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f) + } + v, err := unmarshalJsField(fd, r, m.mf, opts) + if err != nil { + return err + } + if v != nil { + if err := mergeField(m, fd, v); err != nil { + return err + } + } else if fd.GetOneOf() != nil { + // preserve explicit null for oneof fields (this is a little odd but + // mimics the behavior of jsonpb with oneofs in generated message types) + if fd.GetMessageType() != nil { + typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName()) + if typ != nil { + // typed nil + if typ.Kind() != reflect.Ptr { + typ = reflect.PtrTo(typ) + } + v = reflect.Zero(typ).Interface() + } else { + // can't use nil dynamic message, so we just use empty one instead + v = m.mf.NewDynamicMessage(fd.GetMessageType()) + } + if err := m.setField(fd, v); err != nil { + return err + } + } else { + // not a message... explicit null makes no sense + return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f) + } + } else { + m.clearField(fd) + } + } + + if err := r.endObject(); err != nil { + return err + } + + return nil +} + +func isWellKnownValue(fd *desc.FieldDescriptor) bool { + return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && + fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value" +} + +func isWellKnownListValue(fd *desc.FieldDescriptor) bool { + // we look for ListValue; but we also look for Value, which can be assigned a ListValue + return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && + (fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" || + fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value") +} + +func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) { + t, err := r.peek() + if err != nil { + return nil, err + } + if t == nil && !isWellKnownValue(fd) { + // if value is null, just return nil + // (unless field is google.protobuf.Value, in which case + // we fall through to parse it as an instance where its + // underlying value is set to a NullValue) + r.poll() + return nil, nil + } + + if t == json.Delim('{') && fd.IsMap() { + entryType := fd.GetMessageType() + keyType := entryType.FindFieldByNumber(1) + valueType := entryType.FindFieldByNumber(2) + mp := map[interface{}]interface{}{} + + // TODO: if there are just two map keys "key" and "value" and they have the right type of values, + // treat this JSON object as a single map entry message. (In keeping with support of map fields as + // if they were normal repeated field of entry messages as well as supporting a transition from + // optional to repeated...) + + if err := r.beginObject(); err != nil { + return nil, err + } + for r.hasNext() { + kk, err := unmarshalJsFieldElement(keyType, r, mf, opts, false) + if err != nil { + return nil, err + } + vv, err := unmarshalJsFieldElement(valueType, r, mf, opts, true) + if err != nil { + return nil, err + } + mp[kk] = vv + } + if err := r.endObject(); err != nil { + return nil, err + } + + return mp, nil + } else if t == json.Delim('[') && !isWellKnownListValue(fd) { + // We support parsing an array, even if field is not repeated, to mimic support in proto + // binary wire format that supports changing an optional field to repeated and vice versa. + // If the field is not repeated, we only keep the last value in the array. + + if err := r.beginArray(); err != nil { + return nil, err + } + var sl []interface{} + var v interface{} + for r.hasNext() { + var err error + v, err = unmarshalJsFieldElement(fd, r, mf, opts, false) + if err != nil { + return nil, err + } + if fd.IsRepeated() && v != nil { + sl = append(sl, v) + } + } + if err := r.endArray(); err != nil { + return nil, err + } + if fd.IsMap() { + mp := map[interface{}]interface{}{} + for _, m := range sl { + msg := m.(*Message) + kk, err := msg.TryGetFieldByNumber(1) + if err != nil { + return nil, err + } + vv, err := msg.TryGetFieldByNumber(2) + if err != nil { + return nil, err + } + mp[kk] = vv + } + return mp, nil + } else if fd.IsRepeated() { + return sl, nil + } else { + return v, nil + } + } else { + // We support parsing a singular value, even if field is repeated, to mimic support in proto + // binary wire format that supports changing an optional field to repeated and vice versa. + // If the field is repeated, we store value as singleton slice of that one value. + + v, err := unmarshalJsFieldElement(fd, r, mf, opts, false) + if err != nil { + return nil, err + } + if v == nil { + return nil, nil + } + if fd.IsRepeated() { + return []interface{}{v}, nil + } else { + return v, nil + } + } +} + +func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler, allowNilMessage bool) (interface{}, error) { + t, err := r.peek() + if err != nil { + return nil, err + } + + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_GROUP: + + if t == nil && allowNilMessage { + // if json is simply "null" return a nil pointer + r.poll() + return nilMessage(fd.GetMessageType()), nil + } + + m := mf.NewMessage(fd.GetMessageType()) + if dm, ok := m.(*Message); ok { + if err := dm.unmarshalJson(r, opts); err != nil { + return nil, err + } + } else { + var msg json.RawMessage + if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil { + return nil, err + } + if err := r.skip(); err != nil { + return nil, err + } + if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil { + return nil, err + } + } + return m, nil + + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + if e, err := r.nextNumber(); err != nil { + return nil, err + } else { + // value could be string or number + if i, err := e.Int64(); err != nil { + // number cannot be parsed, so see if it's an enum value name + vd := fd.GetEnumType().FindValueByName(string(e)) + if vd != nil { + return vd.GetNumber(), nil + } else { + return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e) + } + } else if i > math.MaxInt32 || i < math.MinInt32 { + return nil, NumericOverflowError + } else { + return int32(i), err + } + } + + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if i, err := r.nextInt(); err != nil { + return nil, err + } else if i > math.MaxInt32 || i < math.MinInt32 { + return nil, NumericOverflowError + } else { + return int32(i), err + } + + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + return r.nextInt() + + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if i, err := r.nextUint(); err != nil { + return nil, err + } else if i > math.MaxUint32 { + return nil, NumericOverflowError + } else { + return uint32(i), err + } + + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + return r.nextUint() + + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if str, ok := t.(string); ok { + if str == "true" { + r.poll() // consume token + return true, err + } else if str == "false" { + r.poll() // consume token + return false, err + } + } + return r.nextBool() + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if f, err := r.nextFloat(); err != nil { + return nil, err + } else { + return float32(f), nil + } + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return r.nextFloat() + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return r.nextBytes() + + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return r.nextString() + + default: + return nil, fmt.Errorf("unknown field type: %v", fd.GetType()) + } +} + +type jsReader struct { + reader *bytes.Reader + dec *json.Decoder + current json.Token + peeked bool +} + +func newJsReader(b []byte) *jsReader { + reader := bytes.NewReader(b) + dec := json.NewDecoder(reader) + dec.UseNumber() + return &jsReader{reader: reader, dec: dec} +} + +func (r *jsReader) unread() io.Reader { + bufs := make([]io.Reader, 3) + var peeked []byte + if r.peeked { + if _, ok := r.current.(json.Delim); ok { + peeked = []byte(fmt.Sprintf("%v", r.current)) + } else { + peeked, _ = json.Marshal(r.current) + } + } + readerCopy := *r.reader + decCopy := *r.dec + + bufs[0] = bytes.NewReader(peeked) + bufs[1] = decCopy.Buffered() + bufs[2] = &readerCopy + return &concatReader{bufs: bufs} +} + +func (r *jsReader) hasNext() bool { + return r.dec.More() +} + +func (r *jsReader) peek() (json.Token, error) { + if r.peeked { + return r.current, nil + } + t, err := r.dec.Token() + if err != nil { + return nil, err + } + r.peeked = true + r.current = t + return t, nil +} + +func (r *jsReader) poll() (json.Token, error) { + if r.peeked { + ret := r.current + r.current = nil + r.peeked = false + return ret, nil + } + return r.dec.Token() +} + +func (r *jsReader) beginObject() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'") + return err +} + +func (r *jsReader) endObject() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'") + return err +} + +func (r *jsReader) beginArray() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['") + return err +} + +func (r *jsReader) endArray() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'") + return err +} + +func (r *jsReader) nextObjectKey() (string, error) { + return r.nextString() +} + +func (r *jsReader) nextString() (string, error) { + t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string") + if err != nil { + return "", err + } + return t.(string), nil +} + +func (r *jsReader) nextBytes() ([]byte, error) { + str, err := r.nextString() + if err != nil { + return nil, err + } + return base64.StdEncoding.DecodeString(str) +} + +func (r *jsReader) nextBool() (bool, error) { + t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean") + if err != nil { + return false, err + } + return t.(bool), nil +} + +func (r *jsReader) nextInt() (int64, error) { + n, err := r.nextNumber() + if err != nil { + return 0, err + } + return n.Int64() +} + +func (r *jsReader) nextUint() (uint64, error) { + n, err := r.nextNumber() + if err != nil { + return 0, err + } + return strconv.ParseUint(string(n), 10, 64) +} + +func (r *jsReader) nextFloat() (float64, error) { + n, err := r.nextNumber() + if err != nil { + return 0, err + } + return n.Float64() +} + +func (r *jsReader) nextNumber() (json.Number, error) { + t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number") + if err != nil { + return "", err + } + switch t := t.(type) { + case json.Number: + return t, nil + case string: + return json.Number(t), nil + } + return "", fmt.Errorf("expecting a number but got %v", t) +} + +func (r *jsReader) skip() error { + t, err := r.poll() + if err != nil { + return err + } + if t == json.Delim('[') { + if err := r.skipArray(); err != nil { + return err + } + } else if t == json.Delim('{') { + if err := r.skipObject(); err != nil { + return err + } + } + return nil +} + +func (r *jsReader) skipArray() error { + for r.hasNext() { + if err := r.skip(); err != nil { + return err + } + } + if err := r.endArray(); err != nil { + return err + } + return nil +} + +func (r *jsReader) skipObject() error { + for r.hasNext() { + // skip object key + if err := r.skip(); err != nil { + return err + } + // and value + if err := r.skip(); err != nil { + return err + } + } + if err := r.endObject(); err != nil { + return err + } + return nil +} + +func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) { + t, err := r.poll() + if err != nil { + return nil, err + } + if t == nil && ifNil != nil { + return ifNil, nil + } + if !predicate(t) { + return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t) + } + return t, nil +} + +type concatReader struct { + bufs []io.Reader + curr int +} + +func (r *concatReader) Read(p []byte) (n int, err error) { + for { + if r.curr >= len(r.bufs) { + err = io.EOF + return + } + var c int + c, err = r.bufs[r.curr].Read(p) + n += c + if err != io.EOF { + return + } + r.curr++ + p = p[c:] + } +} + +// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors +// to resolve message names. It uses the given factory, which may be nil, to +// instantiate messages. The messages that it returns when resolving a type name +// may often be dynamic messages. +func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver { + return &anyResolver{mf: mf, files: files} +} + +type anyResolver struct { + mf *MessageFactory + files []*desc.FileDescriptor + ignored map[*desc.FileDescriptor]struct{} + other jsonpb.AnyResolver +} + +func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) { + if r, ok := r.(*anyResolver); ok { + if _, ok := r.ignored[f]; ok { + // if the current resolver is ignoring this file, it's because another + // (upstream) resolver is already handling it, so nothing to do + return r, false + } + for _, file := range r.files { + if file == f { + // no need to wrap! + return r, false + } + } + // ignore files that will be checked by the resolver we're wrapping + // (we'll just delegate and let it search those files) + ignored := map[*desc.FileDescriptor]struct{}{} + for i := range r.ignored { + ignored[i] = struct{}{} + } + ignore(r.files, ignored) + return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true + } + return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true +} + +func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) { + for _, f := range files { + if _, ok := ignored[f]; ok { + continue + } + ignored[f] = struct{}{} + ignore(f.GetDependencies(), ignored) + } +} + +func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) { + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + + // see if the user-specified resolver is able to do the job + if r.other != nil { + msg, err := r.other.Resolve(typeUrl) + if err == nil { + return msg, nil + } + } + + // try to find the message in our known set of files + checked := map[*desc.FileDescriptor]struct{}{} + for _, f := range r.files { + md := r.findMessage(f, mname, checked) + if md != nil { + return r.mf.NewMessage(md), nil + } + } + // failing that, see if the message factory knows about this type + var ktr *KnownTypeRegistry + if r.mf != nil { + ktr = r.mf.ktr + } else { + ktr = (*KnownTypeRegistry)(nil) + } + m := ktr.CreateIfKnown(mname) + if m != nil { + return m, nil + } + + // no other resolver to fallback to? mimic default behavior + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor { + // if this is an ignored descriptor, skip + if _, ok := r.ignored[fd]; ok { + return nil + } + + // bail if we've already checked this file + if _, ok := checked[fd]; ok { + return nil + } + checked[fd] = struct{}{} + + // see if this file has the message + md := fd.FindMessage(msgName) + if md != nil { + return md + } + + // if not, recursively search the file's imports + for _, dep := range fd.GetDependencies() { + md = r.findMessage(dep, msgName, checked) + if md != nil { + return md + } + } + return nil +} + +var _ jsonpb.AnyResolver = (*anyResolver)(nil) diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go new file mode 100644 index 00000000..69969fc5 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go @@ -0,0 +1,131 @@ +//go:build !go1.12 +// +build !go1.12 + +package dynamic + +import ( + "reflect" + + "github.com/jhump/protoreflect/desc" +) + +// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively +// iterate a map. (We can be more efficient in Go 1.12 and up...) + +func mapsEqual(a, b reflect.Value) bool { + if a.Len() != b.Len() { + return false + } + if a.Len() == 0 && b.Len() == 0 { + // Optimize the case where maps are frequently empty because MapKeys() + // function allocates heavily. + return true + } + + for _, k := range a.MapKeys() { + av := a.MapIndex(k) + bv := b.MapIndex(k) + if !bv.IsValid() { + return false + } + if !fieldsEqual(av.Interface(), bv.Interface()) { + return false + } + } + return true +} + +func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) { + // make a defensive copy while we check the contents + // (also converts to map[interface{}]interface{} if it's some other type) + keyField := fd.GetMessageType().GetFields()[0] + valField := fd.GetMessageType().GetFields()[1] + m := map[interface{}]interface{}{} + for _, k := range val.MapKeys() { + if k.Kind() == reflect.Interface { + // unwrap it + k = reflect.ValueOf(k.Interface()) + } + kk, err := validElementFieldValueForRv(keyField, k, false) + if err != nil { + return nil, err + } + v := val.MapIndex(k) + if v.Kind() == reflect.Interface { + // unwrap it + v = reflect.ValueOf(v.Interface()) + } + vv, err := validElementFieldValueForRv(valField, v, true) + if err != nil { + return nil, err + } + m[kk] = vv + } + return m, nil +} + +func canConvertMap(src reflect.Value, target reflect.Type) bool { + kt := target.Key() + vt := target.Elem() + for _, k := range src.MapKeys() { + if !canConvert(k, kt) { + return false + } + if !canConvert(src.MapIndex(k), vt) { + return false + } + } + return true +} + +func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error { + tkt := targetType.Key() + tvt := targetType.Elem() + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + skt := k.Type() + svt := v.Type() + var nk, nv reflect.Value + if tkt == skt { + nk = k + } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt { + nk = k.Addr() + } else { + nk = reflect.New(tkt).Elem() + if err := mergeVal(k, nk, deterministic); err != nil { + return err + } + } + if tvt == svt { + nv = v + } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt { + nv = v.Addr() + } else { + nv = reflect.New(tvt).Elem() + if err := mergeVal(v, nv, deterministic); err != nil { + return err + } + } + if target.IsNil() { + target.Set(reflect.MakeMap(targetType)) + } + target.SetMapIndex(nk, nv) + } + return nil +} + +func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error { + for _, k := range rv.MapKeys() { + if k.Kind() == reflect.Interface && !k.IsNil() { + k = k.Elem() + } + v := rv.MapIndex(k) + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go new file mode 100644 index 00000000..fb353cfc --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go @@ -0,0 +1,139 @@ +//go:build go1.12 +// +build go1.12 + +package dynamic + +import ( + "reflect" + + "github.com/jhump/protoreflect/desc" +) + +// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate +// over maps more efficiently than using reflect.Value.MapKeys. + +func mapsEqual(a, b reflect.Value) bool { + if a.Len() != b.Len() { + return false + } + if a.Len() == 0 && b.Len() == 0 { + // Optimize the case where maps are frequently empty + return true + } + + iter := a.MapRange() + for iter.Next() { + k := iter.Key() + av := iter.Value() + bv := b.MapIndex(k) + if !bv.IsValid() { + return false + } + if !fieldsEqual(av.Interface(), bv.Interface()) { + return false + } + } + return true +} + +func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) { + // make a defensive copy while we check the contents + // (also converts to map[interface{}]interface{} if it's some other type) + keyField := fd.GetMessageType().GetFields()[0] + valField := fd.GetMessageType().GetFields()[1] + m := map[interface{}]interface{}{} + iter := val.MapRange() + for iter.Next() { + k := iter.Key() + if k.Kind() == reflect.Interface { + // unwrap it + k = reflect.ValueOf(k.Interface()) + } + kk, err := validElementFieldValueForRv(keyField, k, false) + if err != nil { + return nil, err + } + v := iter.Value() + if v.Kind() == reflect.Interface { + // unwrap it + v = reflect.ValueOf(v.Interface()) + } + vv, err := validElementFieldValueForRv(valField, v, true) + if err != nil { + return nil, err + } + m[kk] = vv + } + return m, nil +} + +func canConvertMap(src reflect.Value, target reflect.Type) bool { + kt := target.Key() + vt := target.Elem() + iter := src.MapRange() + for iter.Next() { + if !canConvert(iter.Key(), kt) { + return false + } + if !canConvert(iter.Value(), vt) { + return false + } + } + return true +} + +func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error { + tkt := targetType.Key() + tvt := targetType.Elem() + iter := src.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + skt := k.Type() + svt := v.Type() + var nk, nv reflect.Value + if tkt == skt { + nk = k + } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt { + nk = k.Addr() + } else { + nk = reflect.New(tkt).Elem() + if err := mergeVal(k, nk, deterministic); err != nil { + return err + } + } + if tvt == svt { + nv = v + } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt { + nv = v.Addr() + } else { + nv = reflect.New(tvt).Elem() + if err := mergeVal(v, nv, deterministic); err != nil { + return err + } + } + if target.IsNil() { + target.Set(reflect.MakeMap(targetType)) + } + target.SetMapIndex(nk, nv) + } + return nil +} + +func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error { + iter := rv.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + if k.Kind() == reflect.Interface && !k.IsNil() { + k = k.Elem() + } + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go new file mode 100644 index 00000000..ce727fd5 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go @@ -0,0 +1,100 @@ +package dynamic + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// Merge merges the given source message into the given destination message. Use +// use this instead of proto.Merge when one or both of the messages might be a +// a dynamic message. If there is a problem merging the messages, such as the +// two messages having different types, then this method will panic (just as +// proto.Merges does). +func Merge(dst, src proto.Message) { + if dm, ok := dst.(*Message); ok { + if err := dm.MergeFrom(src); err != nil { + panic(err.Error()) + } + } else if dm, ok := src.(*Message); ok { + if err := dm.MergeInto(dst); err != nil { + panic(err.Error()) + } + } else { + proto.Merge(dst, src) + } +} + +// TryMerge merges the given source message into the given destination message. +// You can use this instead of proto.Merge when one or both of the messages +// might be a dynamic message. Unlike proto.Merge, this method will return an +// error on failure instead of panic'ing. +func TryMerge(dst, src proto.Message) error { + if dm, ok := dst.(*Message); ok { + if err := dm.MergeFrom(src); err != nil { + return err + } + } else if dm, ok := src.(*Message); ok { + if err := dm.MergeInto(dst); err != nil { + return err + } + } else { + // proto.Merge panics on bad input, so we first verify + // inputs and return error instead of panic + out := reflect.ValueOf(dst) + if out.IsNil() { + return errors.New("proto: nil destination") + } + in := reflect.ValueOf(src) + if in.Type() != out.Type() { + return errors.New("proto: type mismatch") + } + proto.Merge(dst, src) + } + return nil +} + +func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error { + rv := reflect.ValueOf(val) + + if fd.IsMap() && rv.Kind() == reflect.Map { + return mergeMapField(m, fd, rv) + } + + if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes { + for i := 0; i < rv.Len(); i++ { + e := rv.Index(i) + if e.Kind() == reflect.Interface && !e.IsNil() { + e = e.Elem() + } + if err := m.addRepeatedField(fd, e.Interface()); err != nil { + return err + } + } + return nil + } + + if fd.IsRepeated() { + return m.addRepeatedField(fd, val) + } else if fd.GetMessageType() == nil { + return m.setField(fd, val) + } + + // it's a message type, so we want to merge contents + var err error + if val, err = validFieldValue(fd, val); err != nil { + return err + } + + existing, _ := m.doGetField(fd, true) + if existing != nil && !reflect.ValueOf(existing).IsNil() { + return TryMerge(existing.(proto.Message), val.(proto.Message)) + } + + // no existing message, so just set field + m.internalSetField(fd, val) + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go new file mode 100644 index 00000000..683e7b33 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go @@ -0,0 +1,207 @@ +package dynamic + +import ( + "reflect" + "sync" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// MessageFactory can be used to create new empty message objects. A default instance +// (without extension registry or known-type registry specified) will always return +// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types. +// The well-known types include primitive wrapper types and a handful of other special +// types defined in standard protobuf definitions, like Any, Duration, and Timestamp. +type MessageFactory struct { + er *ExtensionRegistry + ktr *KnownTypeRegistry +} + +// NewMessageFactoryWithExtensionRegistry creates a new message factory where any +// dynamic messages produced will use the given extension registry to recognize and +// parse extension fields. +func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory { + return NewMessageFactoryWithRegistries(er, nil) +} + +// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the +// known types, per the given registry, will be returned as normal protobuf messages +// (e.g. generated structs, instead of dynamic messages). +func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory { + return NewMessageFactoryWithRegistries(nil, ktr) +} + +// NewMessageFactoryWithDefaults creates a new message factory where all "default" types +// (those for which protoc-generated code is statically linked into the Go program) are +// known types. If any dynamic messages are produced, they will recognize and parse all +// "default" extension fields. This is the equivalent of: +// +// NewMessageFactoryWithRegistries( +// NewExtensionRegistryWithDefaults(), +// NewKnownTypeRegistryWithDefaults()) +func NewMessageFactoryWithDefaults() *MessageFactory { + return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults()) +} + +// NewMessageFactoryWithRegistries creates a new message factory with the given extension +// and known type registries. +func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory { + return &MessageFactory{ + er: er, + ktr: ktr, + } +} + +// NewMessage creates a new empty message that corresponds to the given descriptor. +// If the given descriptor describes a "known type" then that type is instantiated. +// Otherwise, an empty dynamic message is returned. +func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message { + var ktr *KnownTypeRegistry + if f != nil { + ktr = f.ktr + } + if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil { + return m + } + return NewMessageWithMessageFactory(md, f) +} + +// NewDynamicMessage creates a new empty dynamic message that corresponds to the given +// descriptor. This is like f.NewMessage(md) except the known type registry is not +// consulted so the return value is always a dynamic message. +// +// This is also like dynamic.NewMessage(md) except that the returned message will use +// this factory when creating other messages, like during de-serialization of fields +// that are themselves message types. +func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message { + return NewMessageWithMessageFactory(md, f) +} + +// GetKnownTypeRegistry returns the known type registry that this factory uses to +// instantiate known (e.g. generated) message types. +func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry { + if f == nil { + return nil + } + return f.ktr +} + +// GetExtensionRegistry returns the extension registry that this factory uses to +// create dynamic messages. The registry is used by dynamic messages to recognize +// and parse extension fields during de-serialization. +func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry { + if f == nil { + return nil + } + return f.er +} + +type wkt interface { + XXX_WellKnownType() string +} + +var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem() + +// KnownTypeRegistry is a registry of known message types, as identified by their +// fully-qualified name. A known message type is one for which a protoc-generated +// struct exists, so a dynamic message is not necessary to represent it. A +// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated +// struct or a dynamic message. The zero-value registry (including the behavior of +// a nil pointer) only knows about the "well-known types" in protobuf. These +// include only the wrapper types and a handful of other special types like Any, +// Duration, and Timestamp. +type KnownTypeRegistry struct { + excludeWkt bool + includeDefault bool + mu sync.RWMutex + types map[string]reflect.Type +} + +// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all +// "default" types (those for which protoc-generated code is statically linked +// into the Go program). +func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry { + return &KnownTypeRegistry{includeDefault: true} +} + +// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not* +// include the "well-known types" in protobuf. So even well-known types would be +// represented by a dynamic message. +func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry { + return &KnownTypeRegistry{excludeWkt: true} +} + +// AddKnownType adds the types of the given messages as known types. +func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) { + r.mu.Lock() + defer r.mu.Unlock() + if r.types == nil { + r.types = map[string]reflect.Type{} + } + for _, kt := range kts { + r.types[proto.MessageName(kt)] = reflect.TypeOf(kt) + } +} + +// CreateIfKnown will construct an instance of the given message if it is a known type. +// If the given name is unknown, nil is returned. +func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message { + msgType := r.GetKnownType(messageName) + if msgType == nil { + return nil + } + + if msgType.Kind() == reflect.Ptr { + return reflect.New(msgType.Elem()).Interface().(proto.Message) + } else { + return reflect.New(msgType).Elem().Interface().(proto.Message) + } +} + +func isWellKnownType(t reflect.Type) bool { + if t.Implements(typeOfWkt) { + return true + } + if msg, ok := reflect.Zero(t).Interface().(proto.Message); ok { + name := proto.MessageName(msg) + _, ok := wellKnownTypeNames[name] + return ok + } + return false +} + +// GetKnownType will return the reflect.Type for the given message name if it is +// known. If it is not known, nil is returned. +func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type { + if r == nil { + // a nil registry behaves the same as zero value instance: only know of well-known types + t := proto.MessageType(messageName) + if t != nil && isWellKnownType(t) { + return t + } + return nil + } + + if r.includeDefault { + t := proto.MessageType(messageName) + if t != nil && isMessage(t) { + return t + } + } else if !r.excludeWkt { + t := proto.MessageType(messageName) + if t != nil && isWellKnownType(t) { + return t + } + } + + r.mu.RLock() + defer r.mu.RUnlock() + return r.types[messageName] +} + +func isMessage(t reflect.Type) bool { + _, ok := reflect.Zero(t).Interface().(proto.Message) + return ok +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go new file mode 100644 index 00000000..5680dc2d --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go @@ -0,0 +1,1177 @@ +package dynamic + +// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format + +import ( + "bytes" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "text/scanner" + "unicode" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/codec" + "github.com/jhump/protoreflect/desc" +) + +// MarshalText serializes this message to bytes in the standard text format, +// returning an error if the operation fails. The resulting bytes will be a +// valid UTF8 string. +// +// This method uses a compact form: no newlines, and spaces between field +// identifiers and values are elided. +func (m *Message) MarshalText() ([]byte, error) { + var b indentBuffer + b.indentCount = -1 // no indentation + if err := m.marshalText(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// MarshalTextIndent serializes this message to bytes in the standard text +// format, returning an error if the operation fails. The resulting bytes will +// be a valid UTF8 string. +// +// This method uses a "pretty-printed" form, with each field on its own line and +// spaces between field identifiers and values. +func (m *Message) MarshalTextIndent() ([]byte, error) { + var b indentBuffer + b.indent = " " // TODO: option for indent? + if err := m.marshalText(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func (m *Message) marshalText(b *indentBuffer) error { + // TODO: option for emitting extended Any format? + first := true + // first the known fields + for _, tag := range m.knownFieldTags() { + itag := int32(tag) + v := m.values[itag] + fd := m.FindFieldDescriptor(itag) + if fd.IsMap() { + md := fd.GetMessageType() + kfd := md.FindFieldByNumber(1) + vfd := md.FindFieldByNumber(2) + mp := v.(map[interface{}]interface{}) + keys := make([]interface{}, 0, len(mp)) + for k := range mp { + keys = append(keys, k) + } + sort.Sort(sortable(keys)) + for _, mk := range keys { + mv := mp[mk] + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv) + if err != nil { + return err + } + } + } else if fd.IsRepeated() { + sl := v.([]interface{}) + for _, slv := range sl { + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldText(b, fd, slv) + if err != nil { + return err + } + } + } else { + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldText(b, fd, v) + if err != nil { + return err + } + } + } + // then the unknown fields + for _, tag := range m.unknownFieldTags() { + itag := int32(tag) + ufs := m.unknownFields[itag] + for _, uf := range ufs { + err := b.maybeNext(&first) + if err != nil { + return err + } + _, err = fmt.Fprintf(b, "%d", tag) + if err != nil { + return err + } + if uf.Encoding == proto.WireStartGroup { + err = b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + in := codec.NewBuffer(uf.Contents) + err = marshalUnknownGroupText(b, in, true) + if err != nil { + return err + } + err = b.end() + if err != nil { + return err + } + err = b.WriteByte('}') + if err != nil { + return err + } + } else { + err = b.sep() + if err != nil { + return err + } + if uf.Encoding == proto.WireBytes { + err = writeString(b, string(uf.Contents)) + if err != nil { + return err + } + } else { + _, err = b.WriteString(strconv.FormatUint(uf.Value, 10)) + if err != nil { + return err + } + } + } + } + } + return nil +} + +func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error { + var name string + if fd.IsExtension() { + name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName()) + } else { + name = fd.GetName() + } + _, err := b.WriteString(name) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + + err = b.WriteByte('<') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + err = marshalKnownFieldText(b, kfd, mk) + if err != nil { + return err + } + err = b.next() + if err != nil { + return err + } + if !isNil(mv) { + err = marshalKnownFieldText(b, vfd, mv) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + return b.WriteByte('>') +} + +func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error { + group := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP + if group { + var name string + if fd.IsExtension() { + name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName()) + } else { + name = fd.GetMessageType().GetName() + } + _, err := b.WriteString(name) + if err != nil { + return err + } + } else { + var name string + if fd.IsExtension() { + name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName()) + } else { + name = fd.GetName() + } + _, err := b.WriteString(name) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + } + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Int32, reflect.Int64: + ed := fd.GetEnumType() + if ed != nil { + n := int32(rv.Int()) + vd := ed.FindValueByNumber(n) + if vd == nil { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } else { + _, err := b.WriteString(vd.GetName()) + return err + } + } else { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } + case reflect.Uint32, reflect.Uint64: + _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10)) + return err + case reflect.Float32, reflect.Float64: + f := rv.Float() + var str string + if math.IsNaN(f) { + str = "nan" + } else if math.IsInf(f, 1) { + str = "inf" + } else if math.IsInf(f, -1) { + str = "-inf" + } else { + var bits int + if rv.Kind() == reflect.Float32 { + bits = 32 + } else { + bits = 64 + } + str = strconv.FormatFloat(rv.Float(), 'g', -1, bits) + } + _, err := b.WriteString(str) + return err + case reflect.Bool: + _, err := b.WriteString(strconv.FormatBool(rv.Bool())) + return err + case reflect.Slice: + return writeString(b, string(rv.Bytes())) + case reflect.String: + return writeString(b, rv.String()) + default: + var err error + if group { + err = b.WriteByte('{') + } else { + err = b.WriteByte('<') + } + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + // must be a message + if dm, ok := v.(*Message); ok { + err = dm.marshalText(b) + if err != nil { + return err + } + } else { + err = proto.CompactText(b, v.(proto.Message)) + if err != nil { + return err + } + } + err = b.end() + if err != nil { + return err + } + if group { + return b.WriteByte('}') + } else { + return b.WriteByte('>') + } + } +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(b *indentBuffer, s string) error { + // use WriteByte here to get any needed indent + if err := b.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = b.WriteString("\\n") + case '\r': + _, err = b.WriteString("\\r") + case '\t': + _, err = b.WriteString("\\t") + case '"': + _, err = b.WriteString("\\\"") + case '\\': + _, err = b.WriteString("\\\\") + default: + if c >= 0x20 && c < 0x7f { + err = b.WriteByte(c) + } else { + _, err = fmt.Fprintf(b, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return b.WriteByte('"') +} + +func marshalUnknownGroupText(b *indentBuffer, in *codec.Buffer, topLevel bool) error { + first := true + for { + if in.EOF() { + if topLevel { + return nil + } + // this is a nested message: we are expecting an end-group tag, not EOF! + return io.ErrUnexpectedEOF + } + tag, wireType, err := in.DecodeTagAndWireType() + if err != nil { + return err + } + if wireType == proto.WireEndGroup { + return nil + } + err = b.maybeNext(&first) + if err != nil { + return err + } + _, err = fmt.Fprintf(b, "%d", tag) + if err != nil { + return err + } + if wireType == proto.WireStartGroup { + err = b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + err = marshalUnknownGroupText(b, in, false) + if err != nil { + return err + } + err = b.end() + if err != nil { + return err + } + err = b.WriteByte('}') + if err != nil { + return err + } + continue + } else { + err = b.sep() + if err != nil { + return err + } + if wireType == proto.WireBytes { + contents, err := in.DecodeRawBytes(false) + if err != nil { + return err + } + err = writeString(b, string(contents)) + if err != nil { + return err + } + } else { + var v uint64 + switch wireType { + case proto.WireVarint: + v, err = in.DecodeVarint() + case proto.WireFixed32: + v, err = in.DecodeFixed32() + case proto.WireFixed64: + v, err = in.DecodeFixed64() + default: + return proto.ErrInternalBadWireType + } + if err != nil { + return err + } + _, err = b.WriteString(strconv.FormatUint(v, 10)) + if err != nil { + return err + } + } + } + } +} + +// UnmarshalText de-serializes the message that is present, in text format, in +// the given bytes into this message. It first resets the current message. It +// returns an error if the given bytes do not contain a valid encoding of this +// message type in the standard text format +func (m *Message) UnmarshalText(text []byte) error { + m.Reset() + if err := m.UnmarshalMergeText(text); err != nil { + return err + } + return m.Validate() +} + +// UnmarshalMergeText de-serializes the message that is present, in text format, +// in the given bytes into this message. Unlike UnmarshalText, it does not first +// reset the message, instead merging the data in the given bytes into the +// existing data in this message. +func (m *Message) UnmarshalMergeText(text []byte) error { + return m.unmarshalText(newReader(text), tokenEOF) +} + +func (m *Message) unmarshalText(tr *txtReader, end tokenType) error { + for { + tok := tr.next() + if tok.tokTyp == end { + return nil + } + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + var fd *desc.FieldDescriptor + var extendedAnyType *desc.MessageDescriptor + if tok.tokTyp == tokenInt { + // tag number (indicates unknown field) + tag, err := strconv.ParseInt(tok.val.(string), 10, 32) + if err != nil { + return err + } + itag := int32(tag) + fd = m.FindFieldDescriptor(itag) + if fd == nil { + // can't parse the value w/out field descriptor, so skip it + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if tok.tokTyp == tokenOpenBrace { + if err := skipMessageText(tr, true); err != nil { + return err + } + } else if tok.tokTyp == tokenColon { + if err := skipFieldValueText(tr); err != nil { + return err + } + } else { + return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt) + } + tok = tr.peek() + if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + continue + } + } else { + fieldName, err := unmarshalFieldNameText(tr, tok) + if err != nil { + return err + } + fd = m.FindFieldDescriptorByName(fieldName) + if fd == nil { + // See if it's a group name + for _, field := range m.md.GetFields() { + if field.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName { + fd = field + break + } + } + if fd == nil { + // maybe this is an extended Any + if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") { + // strip surrounding "[" and "]" and extract type name from URL + typeUrl := fieldName[1 : len(fieldName)-1] + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + // TODO: add a way to weave an AnyResolver to this point + extendedAnyType = findMessageDescriptor(mname, m.md.GetFile()) + if extendedAnyType == nil { + return textError(tok, "could not parse Any with unknown type URL %q", fieldName) + } + // field 1 is "type_url" + typeUrlField := m.md.FindFieldByNumber(1) + if err := m.TrySetField(typeUrlField, typeUrl); err != nil { + return err + } + } else { + // TODO: add a flag to just ignore unrecognized field names + return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName()) + } + } + } + } + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + if extendedAnyType != nil { + // consume optional colon; make sure this is a "start message" token + if tok.tokTyp == tokenColon { + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + } + if tok.tokTyp.EndToken() == tokenError { + return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt) + } + + // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it + g := m.mf.NewDynamicMessage(extendedAnyType) + if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil { + return err + } + // now we marshal the message to bytes and store in the Any + b, err := g.Marshal() + if err != nil { + return err + } + // field 2 is "value" + anyValueField := m.md.FindFieldByNumber(2) + if err := m.TrySetField(anyValueField, b); err != nil { + return err + } + + } else if (fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP || + fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) && + tok.tokTyp.EndToken() != tokenError { + + // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it + g := m.mf.NewDynamicMessage(fd.GetMessageType()) + if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil { + return err + } + if fd.IsRepeated() { + if err := m.TryAddRepeatedField(fd, g); err != nil { + return err + } + } else { + if err := m.TrySetField(fd, g); err != nil { + return err + } + } + } else { + if tok.tokTyp != tokenColon { + return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt) + } + if err := m.unmarshalFieldValueText(fd, tr); err != nil { + return err + } + } + tok = tr.peek() + if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + } +} +func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor { + md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{}) + if md == nil { + // couldn't find it; see if we have this message linked in + md, _ = desc.LoadMessageDescriptor(name) + } + return md +} + +func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor { + if _, ok := seen[fd]; ok { + // already checked this file + return nil + } + seen[fd] = struct{}{} + md := fd.FindMessage(name) + if md != nil { + return md + } + // not in this file so recursively search its deps + for _, dep := range fd.GetDependencies() { + md = findMessageInTransitiveDeps(name, dep, seen) + if md != nil { + return md + } + } + // couldn't find it + return nil +} + +func textError(tok *token, format string, args ...interface{}) error { + var msg string + if tok.tokTyp == tokenError { + msg = tok.val.(error).Error() + } else { + msg = fmt.Sprintf(format, args...) + } + return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg) +} + +type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error + +func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error { + var set setFunction + if fd.IsRepeated() { + set = (*Message).addRepeatedField + } else { + set = mergeField + } + tok := tr.peek() + if tok.tokTyp == tokenOpenBracket { + tr.next() // consume tok + for { + if err := m.unmarshalFieldElementText(fd, tr, set); err != nil { + return err + } + tok = tr.peek() + if tok.tokTyp == tokenCloseBracket { + tr.next() // consume tok + return nil + } else if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + } + } + return m.unmarshalFieldElementText(fd, tr, set) +} + +func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error { + tok := tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + + var expected string + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if tok.tokTyp == tokenIdent { + if tok.val.(string) == "true" { + return set(m, fd, true) + } else if tok.val.(string) == "false" { + return set(m, fd, false) + } + } + expected = "boolean value" + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + if tok.tokTyp == tokenString { + return set(m, fd, []byte(tok.val.(string))) + } + expected = "bytes string value" + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + if tok.tokTyp == tokenString { + return set(m, fd, tok.val) + } + expected = "string value" + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + switch tok.tokTyp { + case tokenFloat: + return set(m, fd, float32(tok.val.(float64))) + case tokenInt: + if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil { + return err + } else { + return set(m, fd, float32(f)) + } + case tokenIdent: + ident := strings.ToLower(tok.val.(string)) + if ident == "inf" { + return set(m, fd, float32(math.Inf(1))) + } else if ident == "nan" { + return set(m, fd, float32(math.NaN())) + } + case tokenMinus: + peeked := tr.peek() + if peeked.tokTyp == tokenIdent { + ident := strings.ToLower(peeked.val.(string)) + if ident == "inf" { + tr.next() // consume peeked token + return set(m, fd, float32(math.Inf(-1))) + } + } + } + expected = "float value" + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + switch tok.tokTyp { + case tokenFloat: + return set(m, fd, tok.val) + case tokenInt: + if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil { + return err + } else { + return set(m, fd, f) + } + case tokenIdent: + ident := strings.ToLower(tok.val.(string)) + if ident == "inf" { + return set(m, fd, math.Inf(1)) + } else if ident == "nan" { + return set(m, fd, math.NaN()) + } + case tokenMinus: + peeked := tr.peek() + if peeked.tokTyp == tokenIdent { + ident := strings.ToLower(peeked.val.(string)) + if ident == "inf" { + tr.next() // consume peeked token + return set(m, fd, math.Inf(-1)) + } + } + } + expected = "float value" + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil { + return err + } else { + return set(m, fd, int32(i)) + } + } + expected = "int value" + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil { + return err + } else { + return set(m, fd, i) + } + } + expected = "int value" + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil { + return err + } else { + return set(m, fd, uint32(i)) + } + } + expected = "unsigned int value" + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil { + return err + } else { + return set(m, fd, i) + } + } + expected = "unsigned int value" + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + if tok.tokTyp == tokenIdent { + // TODO: add a flag to just ignore unrecognized enum value names? + vd := fd.GetEnumType().FindValueByName(tok.val.(string)) + if vd != nil { + return set(m, fd, vd.GetNumber()) + } + } else if tok.tokTyp == tokenInt { + if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil { + return err + } else { + return set(m, fd, int32(i)) + } + } + expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName()) + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_GROUP: + + endTok := tok.tokTyp.EndToken() + if endTok != tokenError { + dm := m.mf.NewDynamicMessage(fd.GetMessageType()) + if err := dm.unmarshalText(tr, endTok); err != nil { + return err + } + // TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use + // proto package to unmarshal it. But the text parser isn't particularly amenable + // to that, so we instead convert a dynamic message to a generated one if the + // known-type registry knows about the generated type... + var ktr *KnownTypeRegistry + if m.mf != nil { + ktr = m.mf.ktr + } + pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName()) + if pm != nil { + if err := dm.ConvertTo(pm); err != nil { + return set(m, fd, pm) + } + } + return set(m, fd, dm) + } + expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName()) + default: + return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType()) + } + + // if we get here, token was wrong type; create error message + var article string + if strings.Contains("aieou", expected[0:1]) { + article = "an" + } else { + article = "a" + } + return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt) +} + +func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) { + if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen { + // extension name + var closeType tokenType + var closeChar string + if tok.tokTyp == tokenOpenBracket { + closeType = tokenCloseBracket + closeChar = "close bracket ']'" + } else { + closeType = tokenCloseParen + closeChar = "close paren ')'" + } + // must be followed by an identifier + idents := make([]string, 0, 1) + for { + tok = tr.next() + if tok.tokTyp == tokenEOF { + return "", io.ErrUnexpectedEOF + } else if tok.tokTyp != tokenIdent { + return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt) + } + idents = append(idents, tok.val.(string)) + // and then close bracket/paren, or "/" to keep adding URL elements to name + tok = tr.next() + if tok.tokTyp == tokenEOF { + return "", io.ErrUnexpectedEOF + } else if tok.tokTyp == closeType { + break + } else if tok.tokTyp != tokenSlash { + return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt) + } + } + return "[" + strings.Join(idents, "/") + "]", nil + } else if tok.tokTyp == tokenIdent { + // normal field name + return tok.val.(string), nil + } else { + return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt) + } +} + +func skipFieldNameText(tr *txtReader) error { + tok := tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent { + return nil + } else { + _, err := unmarshalFieldNameText(tr, tok) + return err + } +} + +func skipFieldValueText(tr *txtReader) error { + tok := tr.peek() + if tok.tokTyp == tokenOpenBracket { + tr.next() // consume tok + for { + if err := skipFieldElementText(tr); err != nil { + return err + } + tok = tr.peek() + if tok.tokTyp == tokenCloseBracket { + tr.next() // consume tok + return nil + } else if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + + } + } + return skipFieldElementText(tr) +} + +func skipFieldElementText(tr *txtReader) error { + tok := tr.next() + switch tok.tokTyp { + case tokenEOF: + return io.ErrUnexpectedEOF + case tokenInt, tokenFloat, tokenString, tokenIdent: + return nil + case tokenOpenAngle: + return skipMessageText(tr, false) + default: + return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt) + } +} + +func skipMessageText(tr *txtReader, isGroup bool) error { + for { + tok := tr.peek() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if isGroup && tok.tokTyp == tokenCloseBrace { + return nil + } else if !isGroup && tok.tokTyp == tokenCloseAngle { + return nil + } + + // field name or tag + if err := skipFieldNameText(tr); err != nil { + return err + } + + // field value + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if tok.tokTyp == tokenOpenBrace { + if err := skipMessageText(tr, true); err != nil { + return err + } + } else if tok.tokTyp == tokenColon { + if err := skipFieldValueText(tr); err != nil { + return err + } + } else { + return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt) + } + + tok = tr.peek() + if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + } +} + +type tokenType int + +const ( + tokenError tokenType = iota + tokenEOF + tokenIdent + tokenString + tokenInt + tokenFloat + tokenColon + tokenComma + tokenSemiColon + tokenOpenBrace + tokenCloseBrace + tokenOpenBracket + tokenCloseBracket + tokenOpenAngle + tokenCloseAngle + tokenOpenParen + tokenCloseParen + tokenSlash + tokenMinus +) + +func (t tokenType) IsSep() bool { + return t == tokenComma || t == tokenSemiColon +} + +func (t tokenType) EndToken() tokenType { + switch t { + case tokenOpenAngle: + return tokenCloseAngle + case tokenOpenBrace: + return tokenCloseBrace + default: + return tokenError + } +} + +type token struct { + tokTyp tokenType + val interface{} + txt string + pos scanner.Position +} + +type txtReader struct { + scanner scanner.Scanner + peeked token + havePeeked bool +} + +func newReader(text []byte) *txtReader { + sc := scanner.Scanner{} + sc.Init(bytes.NewReader(text)) + sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars | + scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + // identifiers are same restrictions as Go identifiers, except we also allow dots since + // we accept fully-qualified names + sc.IsIdentRune = func(ch rune, i int) bool { + return ch == '_' || unicode.IsLetter(ch) || + (i > 0 && unicode.IsDigit(ch)) || + (i > 0 && ch == '.') + } + // ignore errors; we handle them if/when we see malformed tokens + sc.Error = func(s *scanner.Scanner, msg string) {} + return &txtReader{scanner: sc} +} + +func (p *txtReader) peek() *token { + if p.havePeeked { + return &p.peeked + } + t := p.scanner.Scan() + if t == scanner.EOF { + p.peeked.tokTyp = tokenEOF + p.peeked.val = nil + p.peeked.txt = "" + p.peeked.pos = p.scanner.Position + } else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil { + p.peeked.tokTyp = tokenError + p.peeked.val = err + } + p.havePeeked = true + return &p.peeked +} + +func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error { + p.peeked.pos = pos + p.peeked.txt = text + switch t { + case scanner.Ident: + p.peeked.tokTyp = tokenIdent + p.peeked.val = text + case scanner.Int: + p.peeked.tokTyp = tokenInt + p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned + case scanner.Float: + p.peeked.tokTyp = tokenFloat + var err error + if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil { + return err + } + case scanner.Char, scanner.String: + p.peeked.tokTyp = tokenString + var err error + if p.peeked.val, err = strconv.Unquote(text); err != nil { + return err + } + case '-': // unary minus, for negative ints and floats + ch := p.scanner.Peek() + if ch < '0' || ch > '9' { + p.peeked.tokTyp = tokenMinus + p.peeked.val = '-' + } else { + t := p.scanner.Scan() + if t == scanner.EOF { + return io.ErrUnexpectedEOF + } else if t == scanner.Float { + p.peeked.tokTyp = tokenFloat + text += p.scanner.TokenText() + p.peeked.txt = text + var err error + if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil { + p.peeked.pos = p.scanner.Position + return err + } + } else if t == scanner.Int { + p.peeked.tokTyp = tokenInt + text += p.scanner.TokenText() + p.peeked.txt = text + p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned + } else { + p.peeked.pos = p.scanner.Position + return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText()) + } + } + case ':': + p.peeked.tokTyp = tokenColon + p.peeked.val = ':' + case ',': + p.peeked.tokTyp = tokenComma + p.peeked.val = ',' + case ';': + p.peeked.tokTyp = tokenSemiColon + p.peeked.val = ';' + case '{': + p.peeked.tokTyp = tokenOpenBrace + p.peeked.val = '{' + case '}': + p.peeked.tokTyp = tokenCloseBrace + p.peeked.val = '}' + case '<': + p.peeked.tokTyp = tokenOpenAngle + p.peeked.val = '<' + case '>': + p.peeked.tokTyp = tokenCloseAngle + p.peeked.val = '>' + case '[': + p.peeked.tokTyp = tokenOpenBracket + p.peeked.val = '[' + case ']': + p.peeked.tokTyp = tokenCloseBracket + p.peeked.val = ']' + case '(': + p.peeked.tokTyp = tokenOpenParen + p.peeked.val = '(' + case ')': + p.peeked.tokTyp = tokenCloseParen + p.peeked.val = ')' + case '/': + // only allowed to separate URL components in expanded Any format + p.peeked.tokTyp = tokenSlash + p.peeked.val = '/' + default: + return fmt.Errorf("invalid character: %c", t) + } + return nil +} + +func (p *txtReader) next() *token { + t := p.peek() + if t.tokTyp != tokenEOF && t.tokTyp != tokenError { + p.havePeeked = false + } + return t +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go new file mode 100644 index 00000000..b0e4bbb0 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go @@ -0,0 +1,771 @@ +package grpcreflect + +import ( + "bytes" + "context" + "fmt" + "io" + "reflect" + "runtime" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" + refv1 "github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1" + "github.com/jhump/protoreflect/internal" +) + +// If we try the v1 reflection API and get back "not implemented", we'll wait +// this long before trying v1 again. This allows a long-lived client to +// dynamically switch from v1alpha to v1 if the underlying server is updated +// to support it. But it also prevents every stream request from always trying +// v1 first: if we try it and see it fail, we shouldn't continually retry it +// if we expect it will fail again. +const durationBetweenV1Attempts = time.Hour + +// elementNotFoundError is the error returned by reflective operations where the +// server does not recognize a given file name, symbol name, or extension. +type elementNotFoundError struct { + name string + kind elementKind + symType symbolType // only used when kind == elementKindSymbol + tag int32 // only used when kind == elementKindExtension + + // only errors with a kind of elementKindFile will have a cause, which means + // the named file count not be resolved because of a dependency that could + // not be found where cause describes the missing dependency + cause *elementNotFoundError +} + +type elementKind int + +const ( + elementKindSymbol elementKind = iota + elementKindFile + elementKindExtension +) + +type symbolType string + +const ( + symbolTypeService = "Service" + symbolTypeMessage = "Message" + symbolTypeEnum = "Enum" + symbolTypeUnknown = "Symbol" +) + +func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error { + return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause} +} + +func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error { + return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause} +} + +func fileNotFound(file string, cause *elementNotFoundError) error { + return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause} +} + +func (e *elementNotFoundError) Error() string { + first := true + var b bytes.Buffer + for ; e != nil; e = e.cause { + if first { + first = false + } else { + fmt.Fprint(&b, "\ncaused by: ") + } + switch e.kind { + case elementKindSymbol: + fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name) + case elementKindExtension: + fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name) + default: + fmt.Fprintf(&b, "File not found: %s", e.name) + } + } + return b.String() +} + +// IsElementNotFoundError determines if the given error indicates that a file +// name, symbol name, or extension field was could not be found by the server. +func IsElementNotFoundError(err error) bool { + _, ok := err.(*elementNotFoundError) + return ok +} + +// ProtocolError is an error returned when the server sends a response of the +// wrong type. +type ProtocolError struct { + missingType reflect.Type +} + +func (p ProtocolError) Error() string { + return fmt.Sprintf("Protocol error: response was missing %v", p.missingType) +} + +type extDesc struct { + extendedMessageName string + extensionNumber int32 +} + +// Client is a client connection to a server for performing reflection calls +// and resolving remote symbols. +type Client struct { + ctx context.Context + now func() time.Time + stubV1 refv1.ServerReflectionClient + stubV1Alpha refv1alpha.ServerReflectionClient + + connMu sync.Mutex + cancel context.CancelFunc + stream refv1alpha.ServerReflection_ServerReflectionInfoClient + useV1Alpha bool + lastTriedV1 time.Time + + cacheMu sync.RWMutex + protosByName map[string]*descriptorpb.FileDescriptorProto + filesByName map[string]*desc.FileDescriptor + filesBySymbol map[string]*desc.FileDescriptor + filesByExtension map[extDesc]*desc.FileDescriptor +} + +// NewClient creates a new Client with the given root context and using the +// given RPC stub for talking to the server. +// +// Deprecated: Use NewClientV1Alpha if you are intentionally pinning the +// v1alpha version of the reflection service. Otherwise, use NewClientAuto +// instead. +func NewClient(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client { + return NewClientV1Alpha(ctx, stub) +} + +// NewClientV1Alpha creates a new Client using the v1alpha version of reflection +// with the given root context and using the given RPC stub for talking to the +// server. +func NewClientV1Alpha(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client { + return newClient(ctx, nil, stub) +} + +func newClient(ctx context.Context, stubv1 refv1.ServerReflectionClient, stubv1alpha refv1alpha.ServerReflectionClient) *Client { + cr := &Client{ + ctx: ctx, + now: time.Now, + stubV1: stubv1, + stubV1Alpha: stubv1alpha, + protosByName: map[string]*descriptorpb.FileDescriptorProto{}, + filesByName: map[string]*desc.FileDescriptor{}, + filesBySymbol: map[string]*desc.FileDescriptor{}, + filesByExtension: map[extDesc]*desc.FileDescriptor{}, + } + // don't leak a grpc stream + runtime.SetFinalizer(cr, (*Client).Reset) + return cr +} + +// NewClientAuto creates a new Client that will use either v1 or v1alpha version +// of reflection (based on what the server supports) with the given root context +// and using the given client connection. +// +// It will first the v1 version of the reflection service. If it gets back an +// "Unimplemented" error, it will fall back to using the v1alpha version. It +// will remember which version the server supports for any subsequent operations +// that need to re-invoke the streaming RPC. But, if it's a very long-lived +// client, it will periodically retry the v1 version (in case the server is +// updated to support it also). The period for these retries is every hour. +func NewClientAuto(ctx context.Context, cc grpc.ClientConnInterface) *Client { + stubv1 := refv1.NewServerReflectionClient(cc) + stubv1alpha := refv1alpha.NewServerReflectionClient(cc) + return newClient(ctx, stubv1, stubv1alpha) +} + +// TODO: We should also have a NewClientV1. However that should not refer to internal +// generated code. So it will have to wait until the grpc-go team fixes this issue: +// https://github.com/grpc/grpc-go/issues/5684 + +// FileByFilename asks the server for a file descriptor for the proto file with +// the given name. +func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) { + // hit the cache first + cr.cacheMu.RLock() + if fd, ok := cr.filesByName[filename]; ok { + cr.cacheMu.RUnlock() + return fd, nil + } + fdp, ok := cr.protosByName[filename] + cr.cacheMu.RUnlock() + // not there? see if we've downloaded the proto + if ok { + return cr.descriptorFromProto(fdp) + } + + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileByFilename{ + FileByFilename: filename, + }, + } + accept := func(fd *desc.FileDescriptor) bool { + return fd.GetName() == filename + } + + fd, err := cr.getAndCacheFileDescriptors(req, filename, "", accept) + if isNotFound(err) { + // file not found? see if we can look up via alternate name + if alternate, ok := internal.StdFileAliases[filename]; ok { + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileByFilename{ + FileByFilename: alternate, + }, + } + fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename, accept) + if isNotFound(err) { + err = fileNotFound(filename, nil) + } + } else { + err = fileNotFound(filename, nil) + } + } else if e, ok := err.(*elementNotFoundError); ok { + err = fileNotFound(filename, e) + } + return fd, err +} + +// FileContainingSymbol asks the server for a file descriptor for the proto file +// that declares the given fully-qualified symbol. +func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) { + // hit the cache first + cr.cacheMu.RLock() + fd, ok := cr.filesBySymbol[symbol] + cr.cacheMu.RUnlock() + if ok { + return fd, nil + } + + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: symbol, + }, + } + accept := func(fd *desc.FileDescriptor) bool { + return fd.FindSymbol(symbol) != nil + } + fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept) + if isNotFound(err) { + err = symbolNotFound(symbol, symbolTypeUnknown, nil) + } else if e, ok := err.(*elementNotFoundError); ok { + err = symbolNotFound(symbol, symbolTypeUnknown, e) + } + return fd, err +} + +// FileContainingExtension asks the server for a file descriptor for the proto +// file that declares an extension with the given number for the given +// fully-qualified message name. +func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) { + // hit the cache first + cr.cacheMu.RLock() + fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}] + cr.cacheMu.RUnlock() + if ok { + return fd, nil + } + + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &refv1alpha.ExtensionRequest{ + ContainingType: extendedMessageName, + ExtensionNumber: extensionNumber, + }, + }, + } + accept := func(fd *desc.FileDescriptor) bool { + return fd.FindExtension(extendedMessageName, extensionNumber) != nil + } + fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept) + if isNotFound(err) { + err = extensionNotFound(extendedMessageName, extensionNumber, nil) + } else if e, ok := err.(*elementNotFoundError); ok { + err = extensionNotFound(extendedMessageName, extensionNumber, e) + } + return fd, err +} + +func (cr *Client) getAndCacheFileDescriptors(req *refv1alpha.ServerReflectionRequest, expectedName, alias string, accept func(*desc.FileDescriptor) bool) (*desc.FileDescriptor, error) { + resp, err := cr.send(req) + if err != nil { + return nil, err + } + + fdResp := resp.GetFileDescriptorResponse() + if fdResp == nil { + return nil, &ProtocolError{reflect.TypeOf(fdResp).Elem()} + } + + // Response can contain the result file descriptor, but also its transitive + // deps. Furthermore, protocol states that subsequent requests do not need + // to send transitive deps that have been sent in prior responses. So we + // need to cache all file descriptors and then return the first one (which + // should be the answer). If we're looking for a file by name, we can be + // smarter and make sure to grab one by name instead of just grabbing the + // first one. + var fds []*descriptorpb.FileDescriptorProto + for _, fdBytes := range fdResp.FileDescriptorProto { + fd := &descriptorpb.FileDescriptorProto{} + if err = proto.Unmarshal(fdBytes, fd); err != nil { + return nil, err + } + + if expectedName != "" && alias != "" && expectedName != alias && fd.GetName() == expectedName { + // we found a file was aliased, so we need to update the proto to reflect that + fd.Name = proto.String(alias) + } + + cr.cacheMu.Lock() + // store in cache of raw descriptor protos, but don't overwrite existing protos + if existingFd, ok := cr.protosByName[fd.GetName()]; ok { + fd = existingFd + } else { + cr.protosByName[fd.GetName()] = fd + } + cr.cacheMu.Unlock() + + fds = append(fds, fd) + } + + // find the right result from the files returned + for _, fd := range fds { + result, err := cr.descriptorFromProto(fd) + if err != nil { + return nil, err + } + if accept(result) { + return result, nil + } + } + + return nil, status.Errorf(codes.NotFound, "response does not include expected file") +} + +func (cr *Client) descriptorFromProto(fd *descriptorpb.FileDescriptorProto) (*desc.FileDescriptor, error) { + deps := make([]*desc.FileDescriptor, len(fd.GetDependency())) + for i, depName := range fd.GetDependency() { + if dep, err := cr.FileByFilename(depName); err != nil { + return nil, err + } else { + deps[i] = dep + } + } + d, err := desc.CreateFileDescriptor(fd, deps...) + if err != nil { + return nil, err + } + d = cr.cacheFile(d) + return d, nil +} + +func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor { + cr.cacheMu.Lock() + defer cr.cacheMu.Unlock() + + // cache file descriptor by name, but don't overwrite existing entry + // (existing entry could come from concurrent caller) + if existingFd, ok := cr.filesByName[fd.GetName()]; ok { + return existingFd + } + cr.filesByName[fd.GetName()] = fd + + // also cache by symbols and extensions + for _, m := range fd.GetMessageTypes() { + cr.cacheMessageLocked(fd, m) + } + for _, e := range fd.GetEnumTypes() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + for _, v := range e.GetValues() { + cr.filesBySymbol[v.GetFullyQualifiedName()] = fd + } + } + for _, e := range fd.GetExtensions() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd + } + for _, s := range fd.GetServices() { + cr.filesBySymbol[s.GetFullyQualifiedName()] = fd + for _, m := range s.GetMethods() { + cr.filesBySymbol[m.GetFullyQualifiedName()] = fd + } + } + + return fd +} + +func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) { + cr.filesBySymbol[md.GetFullyQualifiedName()] = fd + for _, f := range md.GetFields() { + cr.filesBySymbol[f.GetFullyQualifiedName()] = fd + } + for _, o := range md.GetOneOfs() { + cr.filesBySymbol[o.GetFullyQualifiedName()] = fd + } + for _, e := range md.GetNestedEnumTypes() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + for _, v := range e.GetValues() { + cr.filesBySymbol[v.GetFullyQualifiedName()] = fd + } + } + for _, e := range md.GetNestedExtensions() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd + } + for _, m := range md.GetNestedMessageTypes() { + cr.cacheMessageLocked(fd, m) // recurse + } +} + +// AllExtensionNumbersForType asks the server for all known extension numbers +// for the given fully-qualified message name. +func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) { + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: extendedMessageName, + }, + } + resp, err := cr.send(req) + if err != nil { + if isNotFound(err) { + return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil) + } + return nil, err + } + + extResp := resp.GetAllExtensionNumbersResponse() + if extResp == nil { + return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()} + } + return extResp.ExtensionNumber, nil +} + +// ListServices asks the server for the fully-qualified names of all exposed +// services. +func (cr *Client) ListServices() ([]string, error) { + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_ListServices{ + // proto doesn't indicate any purpose for this value and server impl + // doesn't actually use it... + ListServices: "*", + }, + } + resp, err := cr.send(req) + if err != nil { + return nil, err + } + + listResp := resp.GetListServicesResponse() + if listResp == nil { + return nil, &ProtocolError{reflect.TypeOf(listResp).Elem()} + } + serviceNames := make([]string, len(listResp.Service)) + for i, s := range listResp.Service { + serviceNames[i] = s.Name + } + return serviceNames, nil +} + +func (cr *Client) send(req *refv1alpha.ServerReflectionRequest) (*refv1alpha.ServerReflectionResponse, error) { + // we allow one immediate retry, in case we have a stale stream + // (e.g. closed by server) + resp, err := cr.doSend(req) + if err != nil { + return nil, err + } + + // convert error response messages into errors + errResp := resp.GetErrorResponse() + if errResp != nil { + return nil, status.Errorf(codes.Code(errResp.ErrorCode), "%s", errResp.ErrorMessage) + } + + return resp, nil +} + +func isNotFound(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + return ok && s.Code() == codes.NotFound +} + +func (cr *Client) doSend(req *refv1alpha.ServerReflectionRequest) (*refv1alpha.ServerReflectionResponse, error) { + // TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery + // (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus + // delivered in correct oder. + cr.connMu.Lock() + defer cr.connMu.Unlock() + return cr.doSendLocked(0, nil, req) +} + +func (cr *Client) doSendLocked(attemptCount int, prevErr error, req *refv1alpha.ServerReflectionRequest) (*refv1alpha.ServerReflectionResponse, error) { + if attemptCount >= 3 && prevErr != nil { + return nil, prevErr + } + if status.Code(prevErr) == codes.Unimplemented && cr.useV1() { + cr.useV1Alpha = true + cr.lastTriedV1 = cr.now() + } + attemptCount++ + + if err := cr.initStreamLocked(); err != nil { + return nil, err + } + + if err := cr.stream.Send(req); err != nil { + if err == io.EOF { + // if send returns EOF, must call Recv to get real underlying error + _, err = cr.stream.Recv() + } + cr.resetLocked() + return cr.doSendLocked(attemptCount, err, req) + } + + resp, err := cr.stream.Recv() + if err != nil { + cr.resetLocked() + return cr.doSendLocked(attemptCount, err, req) + } + return resp, nil +} + +func (cr *Client) initStreamLocked() error { + if cr.stream != nil { + return nil + } + var newCtx context.Context + newCtx, cr.cancel = context.WithCancel(cr.ctx) + if cr.useV1Alpha == true && cr.now().Sub(cr.lastTriedV1) > durationBetweenV1Attempts { + // we're due for periodic retry of v1 + cr.useV1Alpha = false + } + if cr.useV1() { + // try the v1 API + streamv1, err := cr.stubV1.ServerReflectionInfo(newCtx) + if err == nil { + cr.stream = adaptStreamFromV1{streamv1} + return nil + } + if status.Code(err) != codes.Unimplemented { + return err + } + // oh well, fall through below to try v1alpha and update state + // so we skip straight to v1alpha next time + cr.useV1Alpha = true + cr.lastTriedV1 = cr.now() + } + var err error + cr.stream, err = cr.stubV1Alpha.ServerReflectionInfo(newCtx) + return err +} + +func (cr *Client) useV1() bool { + return !cr.useV1Alpha && cr.stubV1 != nil +} + +// Reset ensures that any active stream with the server is closed, releasing any +// resources. +func (cr *Client) Reset() { + cr.connMu.Lock() + defer cr.connMu.Unlock() + cr.resetLocked() +} + +func (cr *Client) resetLocked() { + if cr.stream != nil { + cr.stream.CloseSend() + for { + // drain the stream, this covers io.EOF too + if _, err := cr.stream.Recv(); err != nil { + break + } + } + cr.stream = nil + } + if cr.cancel != nil { + cr.cancel() + cr.cancel = nil + } +} + +// ResolveService asks the server to resolve the given fully-qualified service +// name into a service descriptor. +func (cr *Client) ResolveService(serviceName string) (*desc.ServiceDescriptor, error) { + file, err := cr.FileContainingSymbol(serviceName) + if err != nil { + return nil, setSymbolType(err, serviceName, symbolTypeService) + } + d := file.FindSymbol(serviceName) + if d == nil { + return nil, symbolNotFound(serviceName, symbolTypeService, nil) + } + if s, ok := d.(*desc.ServiceDescriptor); ok { + return s, nil + } else { + return nil, symbolNotFound(serviceName, symbolTypeService, nil) + } +} + +// ResolveMessage asks the server to resolve the given fully-qualified message +// name into a message descriptor. +func (cr *Client) ResolveMessage(messageName string) (*desc.MessageDescriptor, error) { + file, err := cr.FileContainingSymbol(messageName) + if err != nil { + return nil, setSymbolType(err, messageName, symbolTypeMessage) + } + d := file.FindSymbol(messageName) + if d == nil { + return nil, symbolNotFound(messageName, symbolTypeMessage, nil) + } + if s, ok := d.(*desc.MessageDescriptor); ok { + return s, nil + } else { + return nil, symbolNotFound(messageName, symbolTypeMessage, nil) + } +} + +// ResolveEnum asks the server to resolve the given fully-qualified enum name +// into an enum descriptor. +func (cr *Client) ResolveEnum(enumName string) (*desc.EnumDescriptor, error) { + file, err := cr.FileContainingSymbol(enumName) + if err != nil { + return nil, setSymbolType(err, enumName, symbolTypeEnum) + } + d := file.FindSymbol(enumName) + if d == nil { + return nil, symbolNotFound(enumName, symbolTypeEnum, nil) + } + if s, ok := d.(*desc.EnumDescriptor); ok { + return s, nil + } else { + return nil, symbolNotFound(enumName, symbolTypeEnum, nil) + } +} + +func setSymbolType(err error, name string, symType symbolType) error { + if e, ok := err.(*elementNotFoundError); ok { + if e.kind == elementKindSymbol && e.name == name && e.symType == symbolTypeUnknown { + e.symType = symType + } + } + return err +} + +// ResolveEnumValues asks the server to resolve the given fully-qualified enum +// name into a map of names to numbers that represents the enum's values. +func (cr *Client) ResolveEnumValues(enumName string) (map[string]int32, error) { + enumDesc, err := cr.ResolveEnum(enumName) + if err != nil { + return nil, err + } + vals := map[string]int32{} + for _, valDesc := range enumDesc.GetValues() { + vals[valDesc.GetName()] = valDesc.GetNumber() + } + return vals, nil +} + +// ResolveExtension asks the server to resolve the given extension number and +// fully-qualified message name into a field descriptor. +func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) (*desc.FieldDescriptor, error) { + file, err := cr.FileContainingExtension(extendedType, extensionNumber) + if err != nil { + return nil, err + } + d := findExtension(extendedType, extensionNumber, fileDescriptorExtensions{file}) + if d == nil { + return nil, extensionNotFound(extendedType, extensionNumber, nil) + } else { + return d, nil + } +} + +func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor { + // search extensions in this scope + for _, ext := range scope.extensions() { + if ext.GetNumber() == extensionNumber && ext.GetOwner().GetFullyQualifiedName() == extendedType { + return ext + } + } + + // if not found, search nested scopes + for _, nested := range scope.nestedScopes() { + ext := findExtension(extendedType, extensionNumber, nested) + if ext != nil { + return ext + } + } + + return nil +} + +type extensionScope interface { + extensions() []*desc.FieldDescriptor + nestedScopes() []extensionScope +} + +// fileDescriptorExtensions implements extensionHolder interface on top of +// FileDescriptorProto +type fileDescriptorExtensions struct { + proto *desc.FileDescriptor +} + +func (fde fileDescriptorExtensions) extensions() []*desc.FieldDescriptor { + return fde.proto.GetExtensions() +} + +func (fde fileDescriptorExtensions) nestedScopes() []extensionScope { + scopes := make([]extensionScope, len(fde.proto.GetMessageTypes())) + for i, m := range fde.proto.GetMessageTypes() { + scopes[i] = msgDescriptorExtensions{m} + } + return scopes +} + +// msgDescriptorExtensions implements extensionHolder interface on top of +// DescriptorProto +type msgDescriptorExtensions struct { + proto *desc.MessageDescriptor +} + +func (mde msgDescriptorExtensions) extensions() []*desc.FieldDescriptor { + return mde.proto.GetNestedExtensions() +} + +func (mde msgDescriptorExtensions) nestedScopes() []extensionScope { + scopes := make([]extensionScope, len(mde.proto.GetNestedMessageTypes())) + for i, m := range mde.proto.GetNestedMessageTypes() { + scopes[i] = msgDescriptorExtensions{m} + } + return scopes +} + +type adaptStreamFromV1 struct { + refv1.ServerReflection_ServerReflectionInfoClient +} + +func (a adaptStreamFromV1) Send(request *refv1alpha.ServerReflectionRequest) error { + v1req := refv1.ToV1Request(request) + return a.ServerReflection_ServerReflectionInfoClient.Send(v1req) +} + +func (a adaptStreamFromV1) Recv() (*refv1alpha.ServerReflectionResponse, error) { + v1resp, err := a.ServerReflection_ServerReflectionInfoClient.Recv() + if err != nil { + return nil, err + } + return refv1.ToV1AlphaResponse(v1resp), nil +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go new file mode 100644 index 00000000..ec7bd029 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go @@ -0,0 +1,10 @@ +// Package grpcreflect provides GRPC-specific extensions to protobuf reflection. +// This includes a way to access rich service descriptors for all services that +// a GRPC server exports. +// +// Also included is an easy-to-use client for the GRPC reflection service +// (https://goo.gl/2ILAHf). This client makes it easy to ask a server (that +// supports the reflection service) for metadata on its exported services, which +// could be used to construct a dynamic client. (See the grpcdynamic package in +// this same repo for more on that.) +package grpcreflect diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.pb.go b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 00000000..5a11bd19 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,972 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1-devel +// protoc v4.22.0 +// source: grpcreflect/internal/grpc_reflection_v1/reflection.proto + +// NOTE: This package has been changed so that if the "canonical" version of this +// proto is compiled and linked into the same program, it won't result in an init +// failure (which can happen if two different Go packages try to define the same +// proto files/types). + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpcreflect_internal_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x67, 0x72, 0x70, 0x63, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x6a, 0x68, 0x75, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x22, 0x86, 0x03, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, + 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, + 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, + 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, + 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x75, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, + 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x8e, 0x05, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x69, + 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x79, 0x0a, 0x18, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x6a, 0x68, + 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, + 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, + 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x16, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6a, + 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x5d, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, + 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, + 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, + 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x67, 0x0a, + 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, + 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x32, 0xb0, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x9b, 0x01, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x3e, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x3f, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, + 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData = file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: jhump.protoreflect.grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: jhump.protoreflect.grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: jhump.protoreflect.grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: jhump.protoreflect.grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: jhump.protoreflect.grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: jhump.protoreflect.grpc.reflection.v1.ErrorResponse +} +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> jhump.protoreflect.grpc.reflection.v1.ExtensionRequest + 0, // 1: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest + 3, // 2: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> jhump.protoreflect.grpc.reflection.v1.FileDescriptorResponse + 4, // 3: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> jhump.protoreflect.grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> jhump.protoreflect.grpc.reflection.v1.ListServiceResponse + 7, // 5: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> jhump.protoreflect.grpc.reflection.v1.ErrorResponse + 6, // 6: jhump.protoreflect.grpc.reflection.v1.ListServiceResponse.service:type_name -> jhump.protoreflect.grpc.reflection.v1.ServiceResponse + 0, // 7: jhump.protoreflect.grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest + 2, // 8: jhump.protoreflect.grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_init() } +func file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_init() { + if File_grpcreflect_internal_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpcreflect_internal_grpc_reflection_v1_reflection_proto = out.File + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.proto b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.proto new file mode 100644 index 00000000..dac86edd --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.proto @@ -0,0 +1,150 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +syntax = "proto3"; + +// NOTE: This package has been changed so that if the "canonical" version of this +// proto is compiled and linked into the same program, it won't result in an init +// failure (which can happen if two different Go packages try to define the same +// proto files/types). +package jhump.protoreflect.grpc.reflection.v1; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1"; +option java_multiple_files = true; +option java_package = "io.grpc.reflection.v1"; +option java_outer_classname = "ServerReflectionProto"; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the message_request + // in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 00000000..db204d61 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,141 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v4.22.0 +// source: grpcreflect/internal/grpc_reflection_v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations must embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error + mustEmbedUnimplementedServerReflectionServer() +} + +// UnimplementedServerReflectionServer must be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} +func (UnimplementedServerReflectionServer) mustEmbedUnimplementedServerReflectionServer() {} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpcreflect/internal/grpc_reflection_v1/reflection.proto", +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/svc_impl.go b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/svc_impl.go new file mode 100644 index 00000000..4e66d581 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/svc_impl.go @@ -0,0 +1,240 @@ +package grpc_reflection_v1 + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +func Register(svr reflection.GRPCServer) { + reflection.Register(registrarInterceptor{svr}) +} + +type registrarInterceptor struct { + svr reflection.GRPCServer +} + +func (r registrarInterceptor) RegisterService(desc *grpc.ServiceDesc, impl interface{}) { + r.svr.RegisterService(&ServerReflection_ServiceDesc, reflectImpl{svr: impl.(grpc_reflection_v1alpha.ServerReflectionServer)}) +} + +func (r registrarInterceptor) GetServiceInfo() map[string]grpc.ServiceInfo { + // HACK: We're using generated code for a proto file where we hacked the proto package + // to avoid init-time issues (for a future where the grpc module also provides the same + // protos/types). But we've rewritten the service names in the generated code, so that + // we expose the expected service (e.g. w/out the hacked package name). That will lead + // to issues trying to load/resolve descriptors for the hacked service. So we remove + // it from the service info. + info := r.svr.GetServiceInfo() + delete(info, "grpc.reflection.v1.ServerReflection") + return info +} + +type reflectImpl struct { + svr grpc_reflection_v1alpha.ServerReflectionServer + UnimplementedServerReflectionServer +} + +func (r reflectImpl) ServerReflectionInfo(stream ServerReflection_ServerReflectionInfoServer) error { + return r.svr.ServerReflectionInfo(streamImpl{stream}) +} + +type streamImpl struct { + ServerReflection_ServerReflectionInfoServer +} + +func (s streamImpl) Send(response *grpc_reflection_v1alpha.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(ToV1Response(response)) +} + +func (s streamImpl) Recv() (*grpc_reflection_v1alpha.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return ToV1AlphaRequest(resp), nil +} + +func ToV1Request(v1alpha *grpc_reflection_v1alpha.ServerReflectionRequest) *ServerReflectionRequest { + var v1 ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *grpc_reflection_v1alpha.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_ListServices: + v1.MessageRequest = &ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func ToV1AlphaRequest(v1 *ServerReflectionRequest) *grpc_reflection_v1alpha.ServerReflectionRequest { + var v1alpha grpc_reflection_v1alpha.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &grpc_reflection_v1alpha.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} + +func ToV1Response(v1alpha *grpc_reflection_v1alpha.ServerReflectionResponse) *ServerReflectionResponse { + var v1 ServerReflectionResponse + v1.ValidHost = v1alpha.ValidHost + if v1alpha.OriginalRequest != nil { + v1.OriginalRequest = ToV1Request(v1alpha.OriginalRequest) + } + switch mr := v1alpha.MessageResponse.(type) { + case *grpc_reflection_v1alpha.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1.MessageResponse = &ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1.MessageResponse = &ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &ServiceResponse{ + Name: svc.GetName(), + } + } + v1.MessageResponse = &ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &ListServiceResponse{ + Service: svcs, + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1.MessageResponse = &ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1 +} + +func ToV1AlphaResponse(v1 *ServerReflectionResponse) *grpc_reflection_v1alpha.ServerReflectionResponse { + var v1alpha grpc_reflection_v1alpha.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &grpc_reflection_v1alpha.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &grpc_reflection_v1alpha.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*grpc_reflection_v1alpha.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &grpc_reflection_v1alpha.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &grpc_reflection_v1alpha.ListServiceResponse{ + Service: svcs, + }, + } + } + case *ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &grpc_reflection_v1alpha.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go new file mode 100644 index 00000000..7ff19127 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go @@ -0,0 +1,67 @@ +package grpcreflect + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + + "github.com/jhump/protoreflect/desc" +) + +// GRPCServer is the interface provided by a gRPC server. In addition to being a +// service registrar (for registering services and handlers), it also has an +// accessor for retrieving metadata about all registered services. +type GRPCServer = reflection.GRPCServer + +// LoadServiceDescriptors loads the service descriptors for all services exposed by the +// given GRPC server. +func LoadServiceDescriptors(s GRPCServer) (map[string]*desc.ServiceDescriptor, error) { + descs := map[string]*desc.ServiceDescriptor{} + for name, info := range s.GetServiceInfo() { + file, ok := info.Metadata.(string) + if !ok { + return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", name, info.Metadata) + } + fd, err := desc.LoadFileDescriptor(file) + if err != nil { + return nil, err + } + d := fd.FindSymbol(name) + if d == nil { + return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, name) + } + sd, ok := d.(*desc.ServiceDescriptor) + if !ok { + return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, name, d) + } + descs[name] = sd + } + return descs, nil +} + +// LoadServiceDescriptor loads a rich descriptor for a given service description +// generated by protoc-gen-go. Generated code contains an unexported symbol with +// a name like "__serviceDesc" which is the service's description. It +// is used internally to register a service implementation with a GRPC server. +// But it can also be used by this package to retrieve the rich descriptor for +// the service. +func LoadServiceDescriptor(svc *grpc.ServiceDesc) (*desc.ServiceDescriptor, error) { + file, ok := svc.Metadata.(string) + if !ok { + return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", svc.ServiceName, svc.Metadata) + } + fd, err := desc.LoadFileDescriptor(file) + if err != nil { + return nil, err + } + d := fd.FindSymbol(svc.ServiceName) + if d == nil { + return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, svc.ServiceName) + } + sd, ok := d.(*desc.ServiceDescriptor) + if !ok { + return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, svc.ServiceName, d) + } + return sd, nil +} diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go new file mode 100644 index 00000000..09f8849e --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go @@ -0,0 +1,118 @@ +package codec + +import ( + "fmt" + "io" +) + +// Buffer is a reader and a writer that wraps a slice of bytes and also +// provides API for decoding and encoding the protobuf binary format. +// +// Its operation is similar to that of a bytes.Buffer: writing pushes +// data to the end of the buffer while reading pops data from the head +// of the buffer. So the same buffer can be used to both read and write. +type Buffer struct { + buf []byte + index int + + // tmp is used when another byte slice is needed, such as when + // serializing messages, since we need to know the length before + // we can write the length prefix; by caching this, including + // after it is grown by serialization operations, we reduce the + // number of allocations needed + tmp []byte + + deterministic bool +} + +// NewBuffer creates a new buffer with the given slice of bytes as the +// buffer's initial contents. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic sets this buffer to encode messages deterministically. This +// is useful for tests. But the overhead is non-zero, so it should not likely be +// used outside of tests. When true, map fields in a message must have their +// keys sorted before serialization to ensure deterministic output. Otherwise, +// values in a map field will be serialized in map iteration order. +func (cb *Buffer) SetDeterministic(deterministic bool) { + cb.deterministic = deterministic +} + +// IsDeterministic returns whether or not this buffer is configured to encode +// messages deterministically. +func (cb *Buffer) IsDeterministic() bool { + return cb.deterministic +} + +// Reset resets this buffer back to empty. Any subsequent writes/encodes +// to the buffer will allocate a new backing slice of bytes. +func (cb *Buffer) Reset() { + cb.buf = []byte(nil) + cb.index = 0 +} + +// Bytes returns the slice of bytes remaining in the buffer. Note that +// this does not perform a copy: if the contents of the returned slice +// are modified, the modifications will be visible to subsequent reads +// via the buffer. +func (cb *Buffer) Bytes() []byte { + return cb.buf[cb.index:] +} + +// String returns the remaining bytes in the buffer as a string. +func (cb *Buffer) String() string { + return string(cb.Bytes()) +} + +// EOF returns true if there are no more bytes remaining to read. +func (cb *Buffer) EOF() bool { + return cb.index >= len(cb.buf) +} + +// Skip attempts to skip the given number of bytes in the input. If +// the input has fewer bytes than the given count, io.ErrUnexpectedEOF +// is returned and the buffer is unchanged. Otherwise, the given number +// of bytes are skipped and nil is returned. +func (cb *Buffer) Skip(count int) error { + if count < 0 { + return fmt.Errorf("proto: bad byte length %d", count) + } + newIndex := cb.index + count + if newIndex < cb.index || newIndex > len(cb.buf) { + return io.ErrUnexpectedEOF + } + cb.index = newIndex + return nil +} + +// Len returns the remaining number of bytes in the buffer. +func (cb *Buffer) Len() int { + return len(cb.buf) - cb.index +} + +// Read implements the io.Reader interface. If there are no bytes +// remaining in the buffer, it will return 0, io.EOF. Otherwise, +// it reads max(len(dest), cb.Len()) bytes from input and copies +// them into dest. It returns the number of bytes copied and a nil +// error in this case. +func (cb *Buffer) Read(dest []byte) (int, error) { + if cb.index == len(cb.buf) { + return 0, io.EOF + } + copied := copy(dest, cb.buf[cb.index:]) + cb.index += copied + return copied, nil +} + +var _ io.Reader = (*Buffer)(nil) + +// Write implements the io.Writer interface. It always returns +// len(data), nil. +func (cb *Buffer) Write(data []byte) (int, error) { + cb.buf = append(cb.buf, data...) + return len(data), nil +} + +var _ io.Writer = (*Buffer)(nil) diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/decode.go b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go new file mode 100644 index 00000000..a25f680f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go @@ -0,0 +1,346 @@ +package codec + +import ( + "errors" + "fmt" + "io" + "math" + + "github.com/golang/protobuf/proto" +) + +// ErrOverflow is returned when an integer is too large to be represented. +var ErrOverflow = errors.New("proto: integer overflow") + +// ErrBadWireType is returned when decoding a wire-type from a buffer that +// is not valid. +var ErrBadWireType = errors.New("proto: bad wiretype") + +func (cb *Buffer) decodeVarintSlow() (x uint64, err error) { + i := cb.index + l := len(cb.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := cb.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + cb.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = ErrOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) DecodeVarint() (uint64, error) { + i := cb.index + buf := cb.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + cb.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return cb.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x := uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, ErrOverflow + +done: + cb.index = i + return x, nil +} + +// DecodeTagAndWireType decodes a field tag and wire type from input. +// This reads a varint and then extracts the two fields from the varint +// value read. +func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) { + var v uint64 + v, err = cb.DecodeVarint() + if err != nil { + return + } + // low 7 bits is wire type + wireType = int8(v & 7) + // rest is int32 tag number + v = v >> 3 + if v > math.MaxInt32 { + err = fmt.Errorf("tag number out of range: %d", v) + return + } + tag = int32(v) + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := cb.index + 8 + if i < 0 || i > len(cb.buf) { + err = io.ErrUnexpectedEOF + return + } + cb.index = i + + x = uint64(cb.buf[i-8]) + x |= uint64(cb.buf[i-7]) << 8 + x |= uint64(cb.buf[i-6]) << 16 + x |= uint64(cb.buf[i-5]) << 24 + x |= uint64(cb.buf[i-4]) << 32 + x |= uint64(cb.buf[i-3]) << 40 + x |= uint64(cb.buf[i-2]) << 48 + x |= uint64(cb.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := cb.index + 4 + if i < 0 || i > len(cb.buf) { + err = io.ErrUnexpectedEOF + return + } + cb.index = i + + x = uint64(cb.buf[i-4]) + x |= uint64(cb.buf[i-3]) << 8 + x |= uint64(cb.buf[i-2]) << 16 + x |= uint64(cb.buf[i-1]) << 24 + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := cb.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := cb.index + nb + if end < cb.index || end > len(cb.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + buf = cb.buf[cb.index:end] + cb.index = end + return + } + + buf = make([]byte, nb) + copy(buf, cb.buf[cb.index:]) + cb.index = end + return +} + +// ReadGroup reads the input until a "group end" tag is found +// and returns the data up to that point. Subsequent reads from +// the buffer will read data after the group end tag. If alloc +// is true, the data is copied to a new slice before being returned. +// Otherwise, the returned slice is a view into the buffer's +// underlying byte slice. +// +// This function correctly handles nested groups: if a "group start" +// tag is found, then that group's end tag will be included in the +// returned data. +func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) { + var groupEnd, dataEnd int + groupEnd, dataEnd, err := cb.findGroupEnd() + if err != nil { + return nil, err + } + var results []byte + if !alloc { + results = cb.buf[cb.index:dataEnd] + } else { + results = make([]byte, dataEnd-cb.index) + copy(results, cb.buf[cb.index:]) + } + cb.index = groupEnd + return results, nil +} + +// SkipGroup is like ReadGroup, except that it discards the +// data and just advances the buffer to point to the input +// right *after* the "group end" tag. +func (cb *Buffer) SkipGroup() error { + groupEnd, _, err := cb.findGroupEnd() + if err != nil { + return err + } + cb.index = groupEnd + return nil +} + +// SkipField attempts to skip the value of a field with the given wire +// type. When consuming a protobuf-encoded stream, it can be called immediately +// after DecodeTagAndWireType to discard the subsequent data for the field. +func (cb *Buffer) SkipField(wireType int8) error { + switch wireType { + case proto.WireFixed32: + if err := cb.Skip(4); err != nil { + return err + } + case proto.WireFixed64: + if err := cb.Skip(8); err != nil { + return err + } + case proto.WireVarint: + // skip varint by finding last byte (has high bit unset) + i := cb.index + limit := i + 10 // varint cannot be >10 bytes + for { + if i >= limit { + return ErrOverflow + } + if i >= len(cb.buf) { + return io.ErrUnexpectedEOF + } + if cb.buf[i]&0x80 == 0 { + break + } + i++ + } + // TODO: This would only overflow if buffer length was MaxInt and we + // read the last byte. This is not a real/feasible concern on 64-bit + // systems. Something to worry about for 32-bit systems? Do we care? + cb.index = i + 1 + case proto.WireBytes: + l, err := cb.DecodeVarint() + if err != nil { + return err + } + if err := cb.Skip(int(l)); err != nil { + return err + } + case proto.WireStartGroup: + if err := cb.SkipGroup(); err != nil { + return err + } + default: + return ErrBadWireType + } + return nil +} + +func (cb *Buffer) findGroupEnd() (groupEnd int, dataEnd int, err error) { + start := cb.index + defer func() { + cb.index = start + }() + for { + fieldStart := cb.index + // read a field tag + _, wireType, err := cb.DecodeTagAndWireType() + if err != nil { + return 0, 0, err + } + if wireType == proto.WireEndGroup { + return cb.index, fieldStart, nil + } + // skip past the field's data + if err := cb.SkipField(wireType); err != nil { + return 0, 0, err + } + } +} diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/encode.go b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go new file mode 100644 index 00000000..524f1bcb --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go @@ -0,0 +1,147 @@ +package codec + +import ( + "github.com/golang/protobuf/proto" +) + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + cb.buf = append(cb.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + cb.buf = append(cb.buf, uint8(x)) + return nil +} + +// EncodeTagAndWireType encodes the given field tag and wire type to the +// buffer. This combines the two values and then writes them as a varint. +func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error { + v := uint64((int64(tag) << 3) | int64(wireType)) + return cb.EncodeVarint(v) +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) EncodeFixed64(x uint64) error { + cb.buf = append(cb.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) EncodeFixed32(x uint64) error { + cb.buf = append(cb.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) EncodeRawBytes(b []byte) error { + if err := cb.EncodeVarint(uint64(len(b))); err != nil { + return err + } + cb.buf = append(cb.buf, b...) + return nil +} + +// EncodeMessage writes the given message to the buffer. +func (cb *Buffer) EncodeMessage(pm proto.Message) error { + bytes, err := marshalMessage(cb.buf, pm, cb.deterministic) + if err != nil { + return err + } + cb.buf = bytes + return nil +} + +// EncodeDelimitedMessage writes the given message to the buffer with a +// varint-encoded length prefix (the delimiter). +func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error { + bytes, err := marshalMessage(cb.tmp, pm, cb.deterministic) + if err != nil { + return err + } + // save truncated buffer if it was grown (so we can re-use it and + // curtail future allocations) + if cap(bytes) > cap(cb.tmp) { + cb.tmp = bytes[:0] + } + return cb.EncodeRawBytes(bytes) +} + +func marshalMessage(b []byte, pm proto.Message, deterministic bool) ([]byte, error) { + // We try to use the most efficient way to marshal to existing slice. + + if deterministic { + // see if the message has custom deterministic methods, preferring an + // "append" method over one that must always re-allocate + madm, ok := pm.(interface { + MarshalAppendDeterministic(b []byte) ([]byte, error) + }) + if ok { + return madm.MarshalAppendDeterministic(b) + } + + mdm, ok := pm.(interface { + MarshalDeterministic() ([]byte, error) + }) + if ok { + bytes, err := mdm.MarshalDeterministic() + if err != nil { + return nil, err + } + if len(b) == 0 { + return bytes, nil + } + return append(b, bytes...), nil + } + + var buf proto.Buffer + buf.SetDeterministic(true) + if err := buf.Marshal(pm); err != nil { + return nil, err + } + bytes := buf.Bytes() + if len(b) == 0 { + return bytes, nil + } + return append(b, bytes...), nil + } + + mam, ok := pm.(interface { + // see if we can append the message, vs. having to re-allocate + MarshalAppend(b []byte) ([]byte, error) + }) + if ok { + return mam.MarshalAppend(b) + } + + // lowest common denominator + bytes, err := proto.Marshal(pm) + if err != nil { + return nil, err + } + if len(b) == 0 { + return bytes, nil + } + return append(b, bytes...), nil +} diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go new file mode 100644 index 00000000..777c3a43 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go @@ -0,0 +1,127 @@ +// Package internal contains some code that should not be exported but needs to +// be shared across more than one of the protoreflect sub-packages. +package internal + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +// TODO: replace this alias configuration with desc.RegisterImportPath? + +// StdFileAliases are the standard protos included with protoc, but older versions of +// their respective packages registered them using incorrect paths. +var StdFileAliases = map[string]string{ + // Files for the github.com/golang/protobuf/ptypes package at one point were + // registered using the path where the proto files are mirrored in GOPATH, + // inside the golang/protobuf repo. + // (Fixed as of https://github.com/golang/protobuf/pull/412) + "google/protobuf/any.proto": "github.com/golang/protobuf/ptypes/any/any.proto", + "google/protobuf/duration.proto": "github.com/golang/protobuf/ptypes/duration/duration.proto", + "google/protobuf/empty.proto": "github.com/golang/protobuf/ptypes/empty/empty.proto", + "google/protobuf/struct.proto": "github.com/golang/protobuf/ptypes/struct/struct.proto", + "google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", + "google/protobuf/wrappers.proto": "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", + // Files for the google.golang.org/genproto/protobuf package at one point + // were registered with an anomalous "src/" prefix. + // (Fixed as of https://github.com/google/go-genproto/pull/31) + "google/protobuf/api.proto": "src/google/protobuf/api.proto", + "google/protobuf/field_mask.proto": "src/google/protobuf/field_mask.proto", + "google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto", + "google/protobuf/type.proto": "src/google/protobuf/type.proto", + + // Other standard files (descriptor.proto and compiler/plugin.proto) are + // registered correctly, so we don't need rules for them here. +} + +func init() { + // We provide aliasing in both directions, to support files with the + // proper import path linked against older versions of the generated + // files AND files that used the aliased import path but linked against + // newer versions of the generated files (which register with the + // correct path). + + // Get all files defined above + keys := make([]string, 0, len(StdFileAliases)) + for k := range StdFileAliases { + keys = append(keys, k) + } + // And add inverse mappings + for _, k := range keys { + alias := StdFileAliases[k] + StdFileAliases[alias] = k + } +} + +type ErrNoSuchFile string + +func (e ErrNoSuchFile) Error() string { + return fmt.Sprintf("no such file: %q", string(e)) +} + +// LoadFileDescriptor loads a registered descriptor and decodes it. If the given +// name cannot be loaded but is a known standard name, an alias will be tried, +// so the standard files can be loaded even if linked against older "known bad" +// versions of packages. +func LoadFileDescriptor(file string) (*descriptorpb.FileDescriptorProto, error) { + fdb := proto.FileDescriptor(file) + aliased := false + if fdb == nil { + var ok bool + alias, ok := StdFileAliases[file] + if ok { + aliased = true + if fdb = proto.FileDescriptor(alias); fdb == nil { + return nil, ErrNoSuchFile(file) + } + } else { + return nil, ErrNoSuchFile(file) + } + } + + fd, err := DecodeFileDescriptor(file, fdb) + if err != nil { + return nil, err + } + + if aliased { + // the file descriptor will have the alias used to load it, but + // we need it to have the specified name in order to link it + fd.Name = proto.String(file) + } + + return fd, nil +} + +// DecodeFileDescriptor decodes the bytes of a registered file descriptor. +// Registered file descriptors are first "proto encoded" (e.g. binary format +// for the descriptor protos) and then gzipped. So this function gunzips and +// then unmarshals into a descriptor proto. +func DecodeFileDescriptor(element string, fdb []byte) (*descriptorpb.FileDescriptorProto, error) { + raw, err := decompress(fdb) + if err != nil { + return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err) + } + fd := descriptorpb.FileDescriptorProto{} + if err := proto.Unmarshal(raw, &fd); err != nil { + return nil, fmt.Errorf("bad descriptor for %q: %v", element, err) + } + return &fd, nil +} + +func decompress(b []byte) ([]byte, error) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + out, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + return out, nil +} diff --git a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go new file mode 100644 index 00000000..25376c7b --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go @@ -0,0 +1,20 @@ +package internal + +import ( + "github.com/golang/protobuf/proto" +) + +// GetUnrecognized fetches the bytes of unrecognized fields for the given message. +func GetUnrecognized(msg proto.Message) []byte { + return proto.MessageReflect(msg).GetUnknown() +} + +// SetUnrecognized adds the given bytes to the unrecognized fields for the given message. +func SetUnrecognized(msg proto.Message, data []byte) { + refl := proto.MessageReflect(msg) + existing := refl.GetUnknown() + if len(existing) > 0 { + data = append(existing, data...) + } + refl.SetUnknown(data) +} diff --git a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go index 8cdd7d48..5d62bac9 100644 --- a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go +++ b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go @@ -16,7 +16,6 @@ package strutil import ( "fmt" "net/url" - "strings" "github.com/grafana/regexp" ) @@ -39,26 +38,6 @@ func GraphLinkForExpression(expr string) string { // SanitizeLabelName replaces anything that doesn't match // client_label.LabelNameRE with an underscore. -// Note: this does not handle all Prometheus label name restrictions (such as -// not starting with a digit 0-9), and hence should only be used if the label -// name is prefixed with a known valid string. func SanitizeLabelName(name string) string { return invalidLabelCharRE.ReplaceAllString(name, "_") } - -// SanitizeFullLabelName replaces any invalid character with an underscore, and -// if given an empty string, returns a string containing a single underscore. -func SanitizeFullLabelName(name string) string { - if len(name) == 0 { - return "_" - } - var validSb strings.Builder - for i, b := range name { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - validSb.WriteRune('_') - } else { - validSb.WriteRune(b) - } - } - return validSb.String() -} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000..dc5225b6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000..c646a695 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000..d6c71101 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// addMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000..eae2a99f --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Since(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000..30f632c5 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 00000000..f34a38e4 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,201 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 00000000..e491a9e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..9d4213eb --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 00000000..52338d00 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,60 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 00000000..d6ff2674 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 00000000..c6672c0a --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,28 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) + +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 00000000..1f896092 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,46 @@ +all: vet test testrace + +build: + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + proto \ + test \ + testrace \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 00000000..53019774 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 00000000..0e6ae69a --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,141 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. + +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" +``` + +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. + +Otherwise, to install the `grpc-go` package, run the following command: + +```console +$ go get -u google.golang.org/grpc +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ```sh + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +#### If you are using Go modules: + +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +```go +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 00000000..be6e1087 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 00000000..02f5dc53 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { + if a == nil { + return New(key, value) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } + n.m[key] = value + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. The returned value should not be modified. +func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } + return a.m[key] +} + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000..29475e31 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 00000000..0787d0b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000..392b21fb --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,396 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + CredsBundle credentials.Bundle + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v3.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 00000000..3929c26d --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,254 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + state: connectivity.Connecting, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns *resolver.AddressMap + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := resolver.NewAddressMap() + for _, a := range s.ResolverState.Addresses { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns.Set(a, sc) + b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + } + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 00000000..e31d76e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 00000000..c3341358 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 00000000..4ecfa1c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValue(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 00000000..f7031ad2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: uint32(grpcrand.Intn(len(scs))), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + next uint32 +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 00000000..0359956d --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,481 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn + + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string + + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. +} + +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + } + go ccb.watcher() + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + return ccb +} + +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case u := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { + break + } + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) + case *scStateUpdate: + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) + } + case <-ccb.closed.Done(): + } + + if ccb.closed.HasFired() { + ccb.handleClose() + return + } + } +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) +} + +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs + } + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.updateCh.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { + ccb.balancer.ResolverError(err) +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + newAC, err := cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() + if acState != connectivity.Idle { + go newAC.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 00000000..64a232f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1188 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 00000000..9e20e4d3 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 00000000..32b7fa57 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 00000000..422639c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1655 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID + + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err + } + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + cc.Connect() + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID *channelz.Identifier +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. + + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. + firstResolveEvent *grpcsync.Event + + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.balancerWrapper.exitIdle() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + cc.balancerWrapper.resolverError(err) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + bw := cc.balancerWrapper + cc.mu.Unlock() + + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +// +// Caller must hold cc.mu. +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.balancerWrapper.updateSubConnState(sc, s, err) +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + return nil, ErrClientConnClosing + } + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err + } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + + cc.conns[ac] = struct{}{} + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + ac.resetTransport() + return nil +} + +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if equalAddresses(ac.addrs, addrs) { + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.balancerWrapper.switchTo(newBalancerName) +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.mu.Unlock() + + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + cc.blockingpicker.close() + if bWrapper != nil { + bWrapper.close() + } + if rWrapper != nil { + rWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID *channelz.Identifier + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + ac.state = s + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + + ac.mu.Lock() + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, err) + } + ac.mu.Unlock() + return + } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + + err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return firstConnErr +} + +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ac.ctx) + + onClose := grpcsync.OnceFunc(func() { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + }) + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + onClose() + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + copts.ChannelzParentID = ac.channelzID + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err + } + + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + go newTr.Close(transport.ErrConnClosing) + return nil + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil + } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport + } + return nil +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + return rb, nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return nil, err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil + } +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 00000000..12977654 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 00000000..4cdc6ba7 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 00000000..0b206a57 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 00000000..11b10618 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000..4a899264 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 00000000..5feac3aa --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). + TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. + PerRPCCredentials() PerRPCCredentials + + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 00000000..82bee144 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 00000000..ce2bbc10 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,236 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "net/url" + + credinternal "google.golang.org/grpc/internal/credentials" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 00000000..9372dc32 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,620 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + extraDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger +} + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +var extraDialOptions []DialOption + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes callers of Dial block until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = insecure.NewCredentials() + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header and as the server name in authentication handshake. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id *channelz.Identifier) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + UseProxy: true, + }, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 00000000..0022859a --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 00000000..711763d5 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" + + "google.golang.org/grpc/internal/grpcutil" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // If a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 00000000..3009b35a --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +func (codec) Marshal(v interface{}) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) +} + +func (codec) Name() string { + return Name +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 00000000..8358dd6e --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000..c8bb2be3 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 00000000..ef06a482 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000..b5560b47 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...interface{}) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...interface{}) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...interface{}) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000..a66024d2 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,313 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method. +) + +// Enum value maps for HealthCheckResponse_ServingStatus. +var ( + HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", + } + HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, + } +) + +func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus { + p := new(HealthCheckResponse_ServingStatus) + *p = x + return p +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor() +} + +func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType { + return &file_grpc_health_v1_health_proto_enumTypes[0] +} + +func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead. +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0} +} + +type HealthCheckRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *HealthCheckRequest) Reset() { + *x = HealthCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckRequest) ProtoMessage() {} + +func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthCheckRequest) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +type HealthCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (x *HealthCheckResponse) Reset() { + *x = HealthCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckResponse) ProtoMessage() {} + +func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if x != nil { + return x.Status + } + return HealthCheckResponse_UNKNOWN +} + +var File_grpc_health_v1_health_proto protoreflect.FileDescriptor + +var file_grpc_health_v1_health_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, + 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, + 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, + 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, + 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_health_v1_health_proto_rawDescOnce sync.Once + file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc +) + +func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { + file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + }) + return file_grpc_health_v1_health_proto_rawDescData +} + +var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_goTypes = []interface{}{ + (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus + (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest + (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse +} +var file_grpc_health_v1_health_proto_depIdxs = []int32{ + 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus + 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_grpc_health_v1_health_proto_init() } +func file_grpc_health_v1_health_proto_init() { + if File_grpc_health_v1_health_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_health_v1_health_proto_goTypes, + DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs, + EnumInfos: file_grpc_health_v1_health_proto_enumTypes, + MessageInfos: file_grpc_health_v1_health_proto_msgTypes, + }.Build() + File_grpc_health_v1_health_proto = out.File + file_grpc_health_v1_health_proto_rawDesc = nil + file_grpc_health_v1_health_proto_goTypes = nil + file_grpc_health_v1_health_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 00000000..a332dfd7 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,218 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HealthClient interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +// All implementations should embed UnimplementedHealthServer +// for forward compatibility +type HealthServer interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +// UnimplementedHealthServer should be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} + +func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HealthServer will +// result in compilation errors. +type UnsafeHealthServer interface { + mustEmbedUnimplementedHealthServer() +} + +func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + s.RegisterService(&Health_ServiceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +// Health_ServiceDesc is the grpc.ServiceDesc for Health service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Health_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000..bb96ef57 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 00000000..5fc0ee3d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 00000000..08666f62 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 00000000..3a905d96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 00000000..809d73cc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,189 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + GetMethodLogger(methodName string) MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a MethodLogger for each individual method. +var binLogger Logger + +var grpclogLogger = grpclog.Component("binarylog") + +// SetLogger sets the binary logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.GetMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { + // Max length of header and message. + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} +} + +type logger struct { + config LoggerConfig +} + +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { + return fmt.Errorf("conflicting global rules found") + } + l.config.All = ml + return nil +} + +// Set method logger for "service/*". +// +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) + } + l.config.Services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) + } + l.config.Methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) + } + l.config.Blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) GetMethodLogger(methodName string) MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if _, ok := l.config.Blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if l.config.All == nil { + return nil + } + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 00000000..1ee00a39 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 00000000..f9e80e27 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 00000000..179f4a26 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,435 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger interface { + Log(LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it plugable. + } +} + +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + return m +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) +} + +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 00000000..c2fdd58b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} +} + +func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { + fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } + if err := fs.out.Write(e); err != nil { + return err + } + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + done: make(chan struct{}), + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 00000000..9f6a0c12 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 00000000..777cbcd7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,789 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + db.set(newChannelMap()) + idGen.reset() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. +// +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) + idGen.reset() + + return func() error { + cm := db.get() + if cm == nil { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { + return nil + } + <-ticker.C + } + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { + id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: parent, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) +} + +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") + } + id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid.Int(), + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { + id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return newIdentifer(RefServer, id, nil) +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") + } + id := idGen.genID() + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") + } + id := idGen.genID() + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil +} + +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + s := make([]*SocketMetric, 0, len(sks)) + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 00000000..c9a27acd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 00000000..8e13a3d2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 00000000..7b2f350e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,722 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 00000000..1b1c4cce --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 00000000..8b06eed1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,43 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux environments") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 00000000..8d194e44 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 00000000..837ddc40 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,27 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 00000000..32c9b590 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 00000000..25ade623 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 00000000..2919632d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 00000000..f792fd22 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 00000000..7edd196b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,39 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" +) + +var ( + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 00000000..821dd0a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 00000000..af09711a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" + "strings" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" + federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" + + c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") + // XDSFederation indicates whether federation support is enabled. + XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 00000000..b68e26a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...interface{}) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 00000000..82af70e9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 00000000..517ea706 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + defer mu.Unlock() + return r.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + defer mu.Unlock() + return r.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 00000000..fbe697c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 00000000..6635f7bc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 00000000..9f409096 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 00000000..b25b0bae --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 00000000..e2f948e8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 00000000..6f22bd89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 00000000..ec62b477 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 00000000..7a092b2b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000..fd0ee3dc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,157 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearGlobalDialOptions func() + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 00000000..b2980f8a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 00000000..0177af4b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 00000000..c7a18a94 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..75301c51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,458 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +var logger = grpclog.Component("dns") + +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 00000000..520d9229 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 00000000..7f1a702c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..51e733e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 00000000..b0ead4f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 00000000..b3a72276 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 00000000..999f52cd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,77 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux environments.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux environments. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 00000000..070680ed --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 00000000..409769f4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,990 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + select { + case <-ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + c.trfChan.Store(make(chan struct{})) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + if err := hdr.initStream(str.id); err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possible HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + buf []byte + ) + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d + } + + size := hSize + dSize + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 00000000..9fa306b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 00000000..97198c51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) { + f.mu.Lock() + f.limit = n + f.mu.Unlock() +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 00000000..fb272235 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,462 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats []stats.Handler +} + +func (ht *serverHandlerTransport) Close() { + ht.closeOnce.Do(ht.closeCloseChanOnce) +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close() + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() + return ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + for _, sh := range ht.stats { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 00000000..d518b07e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1758 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "net/http" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandlers []stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + registeredCompressors string + + // Do not access controlBuf with mu held. + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID *channelz.Identifier + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool + + connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if connectCtx.Err() != nil { + // connectCtx expired before exiting the function. Hard close the connection. + conn.Close() + } + }(conn) + + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandlers: opts.StatsHandlers, + initialWindowSize: initialWindowSize, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + onClose: onClose, + } + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + sh.HandleConn(t.ctx, connBegin) + } + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + registeredCompressors := t.registeredCompressors + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = strings.ToLower(v) + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { + Err error + + AllowTransparentRetry bool +} + +func (e NewStreamError) Error() string { + return e.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err + } + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + return checkForHeaderListSize(it) && checkForStreamQuota(it) + }, hdr) + if err != nil { + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, &NewStreamError{Err: hdrListSizeErr} + } + firstTry = false + select { + case <-ch: + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} + case <-t.goAway: + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} + case <-t.ctx.Done(): + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} + } + } + if len(t.statsHandlers) != 0 { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close(err error) { + t.mu.Lock() + // Make sure we only close once. + if t.state == closing { + t.mu.Unlock() + return + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + t.conn.Close() + channelz.RemoveEntry(t.channelzID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } + for _, sh := range t.statsHandlers { + connEnd := &stats.ConnEnd{ + Client: true, + } + sh.HandleConn(t.ctx, connEnd) + } +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(ErrConnClosing) + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + d: data, + } + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.StreamEnded() { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } + } + } + t.mu.Unlock() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + isHeader := false + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + for _, sh := range t.statsHandlers { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + + if !endStream { + return + } + + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) +} + +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { + frame, err := t.framer.fr.ReadFrame() + if err != nil { + return connectionErrorf(true, err, "error reading server preface: %v", err) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) + } + t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 00000000..3dd15647 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1445 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats []stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when Drain() is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID *channelz.Identifier + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + + done := make(chan struct{}) + t := &http2Server{ + ctx: setConnection(context.Background(), rawConn), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandlers, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + sh.HandleConn(t.ctx, connBegin) + } + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + t.conn.Close() + t.controlBuf.finish() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return false + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + return true + } + t.maxStreamID = streamID + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + } + headerError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 400, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return false + } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + if httpMethod != http.MethodPost { + t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + if logger.V(logLevel) { + logger.Infof("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + s.cancel() + return false + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return false + } + } + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.StreamEnded() { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { + return ErrIllegalHeaderWrite + } + + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + return status.Convert(err).Err() + } + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + for _, sh := range t.stats { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + sh.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + + if s.getState() == streamDone { + return nil + } + + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + for _, sh := range t.stats { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + return err + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + return t.streamContextErr(s) + } + } + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return t.streamContextErr(s) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.Drain() + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain() + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } + t.Close() + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } + t.Close() + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) + } + channelz.RemoveEntry(t.channelzID) + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + for _, sh := range t.stats { + connEnd := &stats.ConnEnd{} + sh.HandleConn(t.ctx, connEnd) + } +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + t.maxStreamMu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + sid := t.maxStreamID + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 00000000..2c601a86 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,412 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // https://httpwg.org/specs/rfc7540.html#SettingValues + http2InitHeaderTableSize = 4096 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } + logger = grpclog.Component("transport") +) + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err + } + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err + } + return status.FromProto(st), nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + fmt.Fprintf(&sb, "%%%02X", b) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + sb.WriteByte(b) + } else { + fmt.Fprintf(&sb, "%%%02X", b) + } + } + msg = msg[size:] + } + return sb.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + sb.WriteByte(c) + } else { + sb.WriteByte(byte(parsed)) + i += 2 + } + } else { + sb.WriteByte(c) + } + } + return sb.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 00000000..c11b5278 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 00000000..41596198 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,142 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } + + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 00000000..2e615ee2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,823 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + +const logLevel = 2 + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + + if !s.headerValid { + return nil, s.status.Err() + } + + if s.noHeaders { + return nil, ErrNoHeaders + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials + InTapHandle tap.ServerInHandle + StatsHandlers []stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID *channelz.Identifier + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID *channelz.Identifier + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close(err error) + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 00000000..e8b49277 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 00000000..34d31b5e --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 00000000..fb4a88f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,288 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := make(MD, len(m)) + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000..e01d219f --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000..a5d5516e --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,188 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{})} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + var lastPickErr error + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if st, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, nil, dropError{error: err} + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) + } + + acw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) + continue + } + if t := acw.getAddrConn().getReadyTransport(); t != nil { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, pickResult.Done), nil + } + return t, pickResult.Done, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 00000000..fb7a99e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,183 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() + return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + } + if b.subConn != subConn { + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + } + return + } + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil + return + } + + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) + case connectivity.Connecting: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, + }) + } +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 00000000..cd455478 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 00000000..04b6371a --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 00000000..c22f9a52 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,955 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto + +package grpc_reflection_v1alpha + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the + // message_request in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_reflection_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x22, 0xf8, + 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, + 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, + 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, + 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, + 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, + 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x5b, 0x0a, + 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x18, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, + 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, + 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x93, 0x01, 0x0a, + 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc +) + +func file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData) + }) + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData +} + +var file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse +} +var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest + 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest + 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse + 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse + 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse + 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse + 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest + 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_reflection_grpc_reflection_v1alpha_reflection_proto_init() } +func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { + if File_reflection_grpc_reflection_v1alpha_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes, + }.Build() + File_reflection_grpc_reflection_v1alpha_reflection_proto = out.File + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto new file mode 100644 index 00000000..ee2b82c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -0,0 +1,138 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the + // message_request in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 00000000..b8e76a87 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,155 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 00000000..0b41783a --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,324 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "io" + "sort" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider +} + +var _ GRPCServer = (*grpc.Server)(nil) + +// Register registers the server reflection service on the given gRPC server. +func Register(s GRPCServer) { + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) +} + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerOptions represents the options used to construct a reflection server. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver +} + +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, + } +} + +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver +} + +// fileDescWithDependencies returns a slice of serialized fileDescriptors in +// wire format ([]byte). The fileDescriptors will include fd and all the +// transitive dependencies of fd with names not in sentFileDescriptors. +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} + for len(queue) > 0 { + currentfd := queue[0] + queue = queue[1:] + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) + if err != nil { + return nil, err + } + r = append(r, currentfdEncoded) + } + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) + } + } + return r, nil +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the +// given symbol, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. The given symbol +// can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing +// given extension, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } + } + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { + sentFileDescriptors := make(map[string]bool) + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &rpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *rpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *rpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *rpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *rpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ + Service: s.listServices(), + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 00000000..99db79fa --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + reflection/grpc_reflection_v1alpha/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 + +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go + +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 00000000..efcb7f3e --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000..967cbc73 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,292 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "net" + "net/url" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. + GRPCLB +) + +// Address represents a server the client connects to. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the SubConn. + Attributes *attributes.Attributes + + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attribes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata interface{} +} + +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. +// +// Examples: +// +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +type Target struct { + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. + Authority string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 00000000..05a9d4e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return nil + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 00000000..934fc1aa --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,915 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) + } else { + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } + return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } + + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err + } + + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 7. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 00000000..f4dde72b --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1980 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + extraServerOptions = append(extraServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + extraServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl interface{} + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata interface{} +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID *channelz.Identifier + czData *channelzData + + serverWorkerChannels []chan *serverWorkerData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger + inTapHandle tap.ServerInHandle + statsHandlers []stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} +var extraServerOptions []ServerOption + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl interface{}) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +type listenSocket struct { + net.Listener + channelzID *channelz.Identifier +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + + var tempDelay time.Duration // how long to sleep on accept failure + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(lis.Addr().String(), rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) + if st == nil { + return + } + + if !s.addConn(lisAddr, st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(lisAddr, st) + }() +} + +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, + InTapHandle: s.opts.inTapHandle, + StatsHandlers: s.opts.statsHandlers, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport(c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + + var roundRobinCounter uint32 + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { + return + } + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true + return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainUnaryInterceptors(interceptors) + } + + s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) + } + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) + } + return state.next(ctx, req) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + for _, sh := range shs { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + for _, sh := range shs { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range binlogs { + binlog.Log(logEntry) + } + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength + headerLen, + Data: d, + Length: len(d), + }) + } + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: d, + } + for _, binlog := range binlogs { + binlog.Log(cm) + } + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if len(binlogs) != 0 { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + sh := &binarylog.ServerHeader{ + Header: h, + } + for _, binlog := range binlogs { + binlog.Log(sh) + } + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(st) + } + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(st) + } + } + return err + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + sm := &binarylog.ServerMessage{ + Message: reply, + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(sm) + } + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(st) + } + } + return err +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainStreamInterceptors(interceptors) + } + + s.opts.streamInt = chainedInt +} + +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) + } + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) + } + return state.next(srv, ss) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + shs := s.opts.statsHandlers + var statsBegin *stats.Begin + if len(shs) != 0 { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: shs, + } + + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if len(shs) != 0 { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range ss.binlogs { + binlog.Log(logEntry) + } + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server interface{} + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + conns := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for _, cs := range conns { + for st := range cs { + st.Close() + } + } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain() + } + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 00000000..01bbb202 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,406 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *internalserviceconfig.BalancerConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: *ib, + MaxBackoff: *mb, + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } + if rp.MaxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.MaxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.RetryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..35e7a20a --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 00000000..dc03731e --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 00000000..0285dcc6 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,319 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC attempt begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC attempt begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 00000000..623be39f --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) + } + if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) + } + return New(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 00000000..960c3e33 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1718 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + } + + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + return nil, err + } + + if len(cs.binlogs) != 0 { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } + return err + } + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + } + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(a.ctx, cs.callHdr) + if err != nil { + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) + } + a.s = s + a.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + onCommit func() + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool +} + +func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err + } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } + // Wait for the trailers. + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return true, nil + } + if cs.cc.dopts.disableRetry { + return false, err + } + + pushback := 0 + hasPushback := false + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := a.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + } + + var code codes.Code + if a.s != nil { + code = a.s.Status().Code() + } else { + code = status.Code(err) + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return false, err + } + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return false, nil + case <-cs.ctx.Done(): + t.Stop() + return false, status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { + for { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. + return err + } + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(a, err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + noHeader := false + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } + return toRPCErr(err) + }, cs.commitAttemptLocked) + + if err != nil { + cs.finish(err) + return nil, err + } + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } + } + return m, nil +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { + for _, f := range cs.buffer { + if err := f(attempt); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + op := func(a *csAttempt) error { + return a.sendMsg(m, hdr, payload, data) + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ + OnClientSide: true, + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cm) + } + } + return err +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if len(cs.binlogs) != 0 { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + } + for _, binlog := range cs.binlogs { + binlog.Log(sm) + } + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if len(cs.binlogs) != 0 { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(chc) + } + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(c) + } + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + for _, sh := range a.statsHandlers { + end := &stats.End{ + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + sh.HandleRPC(a.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler []stats.Handler + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if len(ss.binlogs) != 0 { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } + } + sm := &binarylog.ServerMessage{ + Message: data, + } + for _, binlog := range ss.binlogs { + binlog.Log(sm) + } + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(chc) + } + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + } + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + } + for _, binlog := range ss.binlogs { + binlog.Log(cm) + } + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 00000000..bfa5dfa4 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 00000000..07a2d26b --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 00000000..2198e709 --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.51.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 00000000..bd8e0cdb --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version + +if [[ "$1" = "-install" ]]; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Ensure all source files contain a copyright message. +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +misspell -error . + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + + go mod tidy + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +done + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +BuildVersion is deprecated +balancer.ErrTransientFailure +balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.CustomCodec +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +proto is deprecated +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 00000000..5f28148d --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,665 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(val) + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + + mmap.Set(pkey, pval) + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 00000000..21d5d2cb --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard "encoding/json" +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 00000000..66b95870 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,349 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in JSON format using default options. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given proto.Message in the JSON format using options in +// MarshalOptions. Do not depend on the output being stable. It may change over +// time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct{ protoreflect.Message } + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 00000000..6c37d417 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,895 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for objects and arrays. + switch tok.Kind() { + case json.ObjectOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + case json.Name: + // Skip object field value. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + + case json.ArrayOpen: + for { + tok, err := d.Peek() + if err != nil { + return err + } + switch tok.Kind() { + case json.ArrayClose: + d.Read() + return nil + default: + // Skip array item. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + } + return nil +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 00000000..d043a6eb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 00000000..2999d713 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 00000000..f7fea7d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 00000000..50578d65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 00000000..934f2dcb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go new file mode 100644 index 00000000..f77ef0de --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -0,0 +1,717 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dynamicpb creates protocol buffer messages using runtime type information. +package dynamicpb + +import ( + "math" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// enum is a dynamic protoreflect.Enum. +type enum struct { + num protoreflect.EnumNumber + typ protoreflect.EnumType +} + +func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() } +func (e enum) Type() protoreflect.EnumType { return e.typ } +func (e enum) Number() protoreflect.EnumNumber { return e.num } + +// enumType is a dynamic protoreflect.EnumType. +type enumType struct { + desc protoreflect.EnumDescriptor +} + +// NewEnumType creates a new EnumType with the provided descriptor. +// +// EnumTypes created by this package are equal if their descriptors are equal. +// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2). +// +// Enum values created by the EnumType are equal if their numbers are equal. +func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType { + return enumType{desc} +} + +func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} } +func (et enumType) Descriptor() protoreflect.EnumDescriptor { return et.desc } + +// extensionType is a dynamic protoreflect.ExtensionType. +type extensionType struct { + desc extensionTypeDescriptor +} + +// A Message is a dynamically constructed protocol buffer message. +// +// Message implements the proto.Message interface, and may be used with all +// standard proto package functions such as Marshal, Unmarshal, and so forth. +// +// Message also implements the protoreflect.Message interface. See the protoreflect +// package documentation for that interface for how to get and set fields and +// otherwise interact with the contents of a Message. +// +// Reflection API functions which construct messages, such as NewField, +// return new dynamic messages of the appropriate type. Functions which take +// messages, such as Set for a message-value field, will accept any message +// with a compatible type. +// +// Operations which modify a Message are not safe for concurrent use. +type Message struct { + typ messageType + known map[protoreflect.FieldNumber]protoreflect.Value + ext map[protoreflect.FieldNumber]protoreflect.FieldDescriptor + unknown protoreflect.RawFields +} + +var ( + _ protoreflect.Message = (*Message)(nil) + _ protoreflect.ProtoMessage = (*Message)(nil) + _ protoiface.MessageV1 = (*Message)(nil) +) + +// NewMessage creates a new message with the provided descriptor. +func NewMessage(desc protoreflect.MessageDescriptor) *Message { + return &Message{ + typ: messageType{desc}, + known: make(map[protoreflect.FieldNumber]protoreflect.Value), + ext: make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor), + } +} + +// ProtoMessage implements the legacy message interface. +func (m *Message) ProtoMessage() {} + +// ProtoReflect implements the protoreflect.ProtoMessage interface. +func (m *Message) ProtoReflect() protoreflect.Message { + return m +} + +// String returns a string representation of a message. +func (m *Message) String() string { + return protoimpl.X.MessageStringOf(m) +} + +// Reset clears the message to be empty, but preserves the dynamic message type. +func (m *Message) Reset() { + m.known = make(map[protoreflect.FieldNumber]protoreflect.Value) + m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor) + m.unknown = nil +} + +// Descriptor returns the message descriptor. +func (m *Message) Descriptor() protoreflect.MessageDescriptor { + return m.typ.desc +} + +// Type returns the message type. +func (m *Message) Type() protoreflect.MessageType { + return m.typ +} + +// New returns a newly allocated empty message with the same descriptor. +// See protoreflect.Message for details. +func (m *Message) New() protoreflect.Message { + return m.Type().New() +} + +// Interface returns the message. +// See protoreflect.Message for details. +func (m *Message) Interface() protoreflect.ProtoMessage { + return m +} + +// ProtoMethods is an internal detail of the protoreflect.Message interface. +// Users should never call this directly. +func (m *Message) ProtoMethods() *protoiface.Methods { + return nil +} + +// Range visits every populated field in undefined order. +// See protoreflect.Message for details. +func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + for num, v := range m.known { + fd := m.ext[num] + if fd == nil { + fd = m.Descriptor().Fields().ByNumber(num) + } + if !isSet(fd, v) { + continue + } + if !f(fd, v) { + return + } + } +} + +// Has reports whether a field is populated. +// See protoreflect.Message for details. +func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { + m.checkField(fd) + if fd.IsExtension() && m.ext[fd.Number()] != fd { + return false + } + v, ok := m.known[fd.Number()] + if !ok { + return false + } + return isSet(fd, v) +} + +// Clear clears a field. +// See protoreflect.Message for details. +func (m *Message) Clear(fd protoreflect.FieldDescriptor) { + m.checkField(fd) + num := fd.Number() + delete(m.known, num) + delete(m.ext, num) +} + +// Get returns the value of a field. +// See protoreflect.Message for details. +func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + switch { + case fd.IsMap(): + if v.Map().Len() > 0 { + return v + } + case fd.IsList(): + if v.List().Len() > 0 { + return v + } + default: + return v + } + } + switch { + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: fd}) + case fd.IsList(): + return protoreflect.ValueOfList(emptyList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}}) + case fd.Kind() == protoreflect.BytesKind: + return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...)) + default: + return fd.Default() + } +} + +// Mutable returns a mutable reference to a repeated, map, or message field. +// See protoreflect.Message for details. +func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { + panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName())) + } + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + m.ext[num] = fd + m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + return v + } + m.clearOtherOneofFields(fd) + m.known[num] = m.NewField(fd) + if fd.IsExtension() { + m.ext[num] = fd + } + return m.known[num] +} + +// Set stores a value in a field. +// See protoreflect.Message for details. +func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.checkField(fd) + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + if fd.IsExtension() { + isValid := true + switch { + case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v): + isValid = false + case fd.IsList(): + isValid = v.List().IsValid() + case fd.IsMap(): + isValid = v.Map().IsValid() + case fd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())) + } + m.ext[fd.Number()] = fd + } else { + typecheck(fd, v) + } + m.clearOtherOneofFields(fd) + m.known[fd.Number()] = v +} + +func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) { + od := fd.ContainingOneof() + if od == nil { + return + } + num := fd.Number() + for i := 0; i < od.Fields().Len(); i++ { + if n := od.Fields().Get(i).Number(); n != num { + delete(m.known, n) + } + } +} + +// NewField returns a new value for assignable to the field of a given descriptor. +// See protoreflect.Message for details. +func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + switch { + case fd.IsExtension(): + return fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: fd, + mapv: make(map[interface{}]protoreflect.Value), + }) + case fd.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + default: + return fd.Default() + } +} + +// WhichOneof reports which field in a oneof is populated, returning nil if none are populated. +// See protoreflect.Message for details. +func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + for i := 0; i < od.Fields().Len(); i++ { + fd := od.Fields().Get(i) + if m.Has(fd) { + return fd + } + } + return nil +} + +// GetUnknown returns the raw unknown fields. +// See protoreflect.Message for details. +func (m *Message) GetUnknown() protoreflect.RawFields { + return m.unknown +} + +// SetUnknown sets the raw unknown fields. +// See protoreflect.Message for details. +func (m *Message) SetUnknown(r protoreflect.RawFields) { + if m.known == nil { + panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) + } + m.unknown = r +} + +// IsValid reports whether the message is valid. +// See protoreflect.Message for details. +func (m *Message) IsValid() bool { + return m.known != nil +} + +func (m *Message) checkField(fd protoreflect.FieldDescriptor) { + if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() { + if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok { + panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName())) + } + return + } + if fd.Parent() == m.Descriptor() { + return + } + fields := m.Descriptor().Fields() + index := fd.Index() + if index >= fields.Len() || fields.Get(index) != fd { + panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName())) + } +} + +type messageType struct { + desc protoreflect.MessageDescriptor +} + +// NewMessageType creates a new MessageType with the provided descriptor. +// +// MessageTypes created by this package are equal if their descriptors are equal. +// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2). +func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType { + return messageType{desc} +} + +func (mt messageType) New() protoreflect.Message { return NewMessage(mt.desc) } +func (mt messageType) Zero() protoreflect.Message { return &Message{typ: messageType{mt.desc}} } +func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc } +func (mt messageType) Enum(i int) protoreflect.EnumType { + if ed := mt.desc.Fields().Get(i).Enum(); ed != nil { + return NewEnumType(ed) + } + return nil +} +func (mt messageType) Message(i int) protoreflect.MessageType { + if md := mt.desc.Fields().Get(i).Message(); md != nil { + return NewMessageType(md) + } + return nil +} + +type emptyList struct { + desc protoreflect.FieldDescriptor +} + +func (x emptyList) Len() int { return 0 } +func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) } +func (x emptyList) Set(n int, v protoreflect.Value) { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) } +func (x emptyList) AppendMutable() protoreflect.Value { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Truncate(n int) { panic(errors.New("modification of immutable list")) } +func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) } +func (x emptyList) IsValid() bool { return false } + +type dynamicList struct { + desc protoreflect.FieldDescriptor + list []protoreflect.Value +} + +func (x *dynamicList) Len() int { + return len(x.list) +} + +func (x *dynamicList) Get(n int) protoreflect.Value { + return x.list[n] +} + +func (x *dynamicList) Set(n int, v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list[n] = v +} + +func (x *dynamicList) Append(v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list = append(x.list, v) +} + +func (x *dynamicList) AppendMutable() protoreflect.Value { + if x.desc.Message() == nil { + panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName())) + } + v := x.NewElement() + x.Append(v) + return v +} + +func (x *dynamicList) Truncate(n int) { + // Zero truncated elements to avoid keeping data live. + for i := n; i < len(x.list); i++ { + x.list[i] = protoreflect.Value{} + } + x.list = x.list[:n] +} + +func (x *dynamicList) NewElement() protoreflect.Value { + return newListEntry(x.desc) +} + +func (x *dynamicList) IsValid() bool { + return true +} + +type dynamicMap struct { + desc protoreflect.FieldDescriptor + mapv map[interface{}]protoreflect.Value +} + +func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } +func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) { + typecheckSingular(x.desc.MapKey(), k.Value()) + typecheckSingular(x.desc.MapValue(), v) + x.mapv[k.Interface()] = v +} +func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() } +func (x *dynamicMap) Clear(k protoreflect.MapKey) { delete(x.mapv, k.Interface()) } +func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value { + if x.desc.MapValue().Message() == nil { + panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName())) + } + v := x.Get(k) + if !v.IsValid() { + v = x.NewValue() + x.Set(k, v) + } + return v +} +func (x *dynamicMap) Len() int { return len(x.mapv) } +func (x *dynamicMap) NewValue() protoreflect.Value { + if md := x.desc.MapValue().Message(); md != nil { + return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect()) + } + return x.desc.MapValue().Default() +} +func (x *dynamicMap) IsValid() bool { + return x.mapv != nil +} + +func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { + for k, v := range x.mapv { + if !f(protoreflect.ValueOf(k).MapKey(), v) { + return + } + } +} + +func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsMap(): + return v.Map().Len() > 0 + case fd.IsList(): + return v.List().Len() > 0 + case fd.ContainingOneof() != nil: + return true + case fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension(): + switch fd.Kind() { + case protoreflect.BoolKind: + return v.Bool() + case protoreflect.EnumKind: + return v.Enum() != 0 + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + return v.Int() != 0 + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + return v.Uint() != 0 + case protoreflect.FloatKind, protoreflect.DoubleKind: + return v.Float() != 0 || math.Signbit(v.Float()) + case protoreflect.StringKind: + return v.String() != "" + case protoreflect.BytesKind: + return len(v.Bytes()) > 0 + } + } + return true +} + +func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := typeIsValid(fd, v); err != nil { + panic(err) + } +} + +func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + switch { + case !v.IsValid(): + return errors.New("%v: assigning invalid value", fd.FullName()) + case fd.IsMap(): + if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil + case fd.IsList(): + switch list := v.Interface().(type) { + case *dynamicList: + if list.desc == fd && list.IsValid() { + return nil + } + case emptyList: + if list.desc == fd && list.IsValid() { + return nil + } + } + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + default: + return singularTypeIsValid(fd, v) + } +} + +func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := singularTypeIsValid(fd, v); err != nil { + panic(err) + } +} + +func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + vi := v.Interface() + var ok bool + switch fd.Kind() { + case protoreflect.BoolKind: + _, ok = vi.(bool) + case protoreflect.EnumKind: + // We could check against the valid set of enum values, but do not. + _, ok = vi.(protoreflect.EnumNumber) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + _, ok = vi.(int32) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + _, ok = vi.(uint32) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + _, ok = vi.(int64) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + _, ok = vi.(uint64) + case protoreflect.FloatKind: + _, ok = vi.(float32) + case protoreflect.DoubleKind: + _, ok = vi.(float64) + case protoreflect.StringKind: + _, ok = vi.(string) + case protoreflect.BytesKind: + _, ok = vi.([]byte) + case protoreflect.MessageKind, protoreflect.GroupKind: + var m protoreflect.Message + m, ok = vi.(protoreflect.Message) + if ok && m.Descriptor().FullName() != fd.Message().FullName() { + return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName()) + } + if dm, ok := vi.(*Message); ok && dm.known == nil { + return errors.New("%v: assigning invalid zero-value message", fd.FullName()) + } + } + if !ok { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil +} + +func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.Kind() { + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.EnumKind: + return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.MessageKind, protoreflect.GroupKind: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + } + panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind())) +} + +// NewExtensionType creates a new ExtensionType with the provided descriptor. +// +// Dynamic ExtensionTypes with the same descriptor compare as equal. That is, +// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2). +// +// The InterfaceOf and ValueOf methods of the extension type are defined as: +// +// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +// return protoreflect.ValueOf(iv) +// } +// +// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +// return v.Interface() +// } +// +// The Go type used by the proto.GetExtension and proto.SetExtension functions +// is determined by these methods, and is therefore equivalent to the Go type +// used to represent a protoreflect.Value. See the protoreflect.Value +// documentation for more details. +func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType { + if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok { + desc = xt.Descriptor() + } + return extensionType{extensionTypeDescriptor{desc}} +} + +func (xt extensionType) New() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: xt.desc, + mapv: make(map[interface{}]protoreflect.Value), + }) + case xt.desc.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message())) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) Zero() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc}) + case xt.desc.Cardinality() == protoreflect.Repeated: + return protoreflect.ValueOfList(emptyList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}}) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + return xt.desc +} + +func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { + v := protoreflect.ValueOf(iv) + typecheck(xt.desc, v) + return v +} + +func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { + typecheck(xt.desc, v) + return v.Interface() +} + +func (xt extensionType) IsValidInterface(iv interface{}) bool { + return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil +} + +func (xt extensionType) IsValidValue(v protoreflect.Value) bool { + return typeIsValid(xt.desc, v) == nil +} + +type extensionTypeDescriptor struct { + protoreflect.ExtensionDescriptor +} + +func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType { + return extensionType{xt} +} + +func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return xt.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go new file mode 100644 index 00000000..5a8010f1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -0,0 +1,177 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicpb + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +type extField struct { + name protoreflect.FullName + number protoreflect.FieldNumber +} + +// A Types is a collection of dynamically constructed descriptors. +// Its methods are safe for concurrent use. +// +// Types implements protoregistry.MessageTypeResolver and protoregistry.ExtensionTypeResolver. +// A Types may be used as a proto.UnmarshalOptions.Resolver. +type Types struct { + files *protoregistry.Files + + extMu sync.Mutex + atomicExtFiles uint64 + extensionsByMessage map[extField]protoreflect.ExtensionDescriptor +} + +// NewTypes creates a new Types registry with the provided files. +// The Files registry is retained, and changes to Files will be reflected in Types. +// It is not safe to concurrently change the Files while calling Types methods. +func NewTypes(f *protoregistry.Files) *Types { + return &Types{ + files: f, + } +} + +// FindEnumByName looks up an enum by its full name; +// e.g., "google.protobuf.Field.Kind". +// +// This returns (nil, protoregistry.NotFound) if not found. +func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want enum", descName(d)) + } + return NewEnumType(ed), nil +} + +// FindExtensionByName looks up an extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, protoregistry.NotFound) if not found. +func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + xd, ok := d.(protoreflect.ExtensionDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want extension", descName(d)) + } + return NewExtensionType(xd), nil +} + +// FindExtensionByNumber looks up an extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, protoregistry.NotFound) if not found. +func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + // Construct the extension number map lazily, since not every user will need it. + // Update the map if new files are added to the registry. + if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) { + t.updateExtensions() + } + xd := t.extensionsByMessage[extField{message, field}] + if xd == nil { + return nil, protoregistry.NotFound + } + return NewExtensionType(xd), nil +} + +// FindMessageByName looks up a message by its full name; +// e.g. "google.protobuf.Any". +// +// This returns (nil, protoregistry.NotFound) if not found. +func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want message", descName(d)) + } + return NewMessageType(md), nil +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, protoregistry.NotFound) if not found. +func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + return t.FindMessageByName(message) +} + +func (t *Types) updateExtensions() { + t.extMu.Lock() + defer t.extMu.Unlock() + if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) { + return + } + defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles())) + t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + t.registerExtensions(fd.Extensions()) + t.registerExtensionsInMessages(fd.Messages()) + return true + }) +} + +func (t *Types) registerExtensionsInMessages(mds protoreflect.MessageDescriptors) { + count := mds.Len() + for i := 0; i < count; i++ { + md := mds.Get(i) + t.registerExtensions(md.Extensions()) + t.registerExtensionsInMessages(md.Messages()) + } +} + +func (t *Types) registerExtensions(xds protoreflect.ExtensionDescriptors) { + count := xds.Len() + for i := 0; i < count; i++ { + xd := xds.Get(i) + field := xd.Number() + message := xd.ContainingMessage().FullName() + if t.extensionsByMessage == nil { + t.extensionsByMessage = make(map[extField]protoreflect.ExtensionDescriptor) + } + t.extensionsByMessage[extField{message, field}] = xd + } +} + +func descName(d protoreflect.Descriptor) string { + switch d.(type) { + case protoreflect.EnumDescriptor: + return "enum" + case protoreflect.EnumValueDescriptor: + return "enum value" + case protoreflect.MessageDescriptor: + return "message" + case protoreflect.ExtensionDescriptor: + return "extension" + case protoreflect.ServiceDescriptor: + return "service" + default: + return fmt.Sprintf("%T", d) + } +} diff --git a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go new file mode 100644 index 00000000..335be6eb --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go @@ -0,0 +1,575 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/api.proto + +package apipb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + sourcecontextpb "google.golang.org/protobuf/types/known/sourcecontextpb" + typepb "google.golang.org/protobuf/types/known/typepb" + reflect "reflect" + sync "sync" +) + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*typepb.Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax typepb.Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (x *Api) Reset() { + *x = Api{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Api) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Api) ProtoMessage() {} + +func (x *Api) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Api.ProtoReflect.Descriptor instead. +func (*Api) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{0} +} + +func (x *Api) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Api) GetMethods() []*Method { + if x != nil { + return x.Methods + } + return nil +} + +func (x *Api) GetOptions() []*typepb.Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Api) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Api) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Api) GetMixins() []*Mixin { + if x != nil { + return x.Mixins + } + return nil +} + +func (x *Api) GetSyntax() typepb.Syntax { + if x != nil { + return x.Syntax + } + return typepb.Syntax(0) +} + +// Method represents a method of an API interface. +type Method struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*typepb.Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax typepb.Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (x *Method) Reset() { + *x = Method{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Method) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Method) ProtoMessage() {} + +func (x *Method) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Method.ProtoReflect.Descriptor instead. +func (*Method) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{1} +} + +func (x *Method) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Method) GetRequestTypeUrl() string { + if x != nil { + return x.RequestTypeUrl + } + return "" +} + +func (x *Method) GetRequestStreaming() bool { + if x != nil { + return x.RequestStreaming + } + return false +} + +func (x *Method) GetResponseTypeUrl() string { + if x != nil { + return x.ResponseTypeUrl + } + return "" +} + +func (x *Method) GetResponseStreaming() bool { + if x != nil { + return x.ResponseStreaming + } + return false +} + +func (x *Method) GetOptions() []*typepb.Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Method) GetSyntax() typepb.Syntax { + if x != nil { + return x.Syntax + } + return typepb.Syntax(0) +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` +} + +func (x *Mixin) Reset() { + *x = Mixin{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mixin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mixin) ProtoMessage() {} + +func (x *Mixin) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mixin.ProtoReflect.Descriptor instead. +func (*Mixin) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{2} +} + +func (x *Mixin) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Mixin) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +var File_google_protobuf_api_proto protoreflect.FileDescriptor + +var file_google_protobuf_api_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x24, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, + 0x02, 0x0a, 0x03, 0x41, 0x70, 0x69, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x31, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x6d, 0x69, 0x78, 0x69, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x69, 0x78, 0x69, 0x6e, 0x52, 0x06, 0x6d, 0x69, 0x78, 0x69, 0x6e, + 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, + 0x61, 0x78, 0x22, 0xb2, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x11, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0x2f, 0x0a, 0x05, 0x4d, 0x69, 0x78, 0x69, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x08, 0x41, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_api_proto_rawDescOnce sync.Once + file_google_protobuf_api_proto_rawDescData = file_google_protobuf_api_proto_rawDesc +) + +func file_google_protobuf_api_proto_rawDescGZIP() []byte { + file_google_protobuf_api_proto_rawDescOnce.Do(func() { + file_google_protobuf_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_api_proto_rawDescData) + }) + return file_google_protobuf_api_proto_rawDescData +} + +var file_google_protobuf_api_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_protobuf_api_proto_goTypes = []interface{}{ + (*Api)(nil), // 0: google.protobuf.Api + (*Method)(nil), // 1: google.protobuf.Method + (*Mixin)(nil), // 2: google.protobuf.Mixin + (*typepb.Option)(nil), // 3: google.protobuf.Option + (*sourcecontextpb.SourceContext)(nil), // 4: google.protobuf.SourceContext + (typepb.Syntax)(0), // 5: google.protobuf.Syntax +} +var file_google_protobuf_api_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.Api.methods:type_name -> google.protobuf.Method + 3, // 1: google.protobuf.Api.options:type_name -> google.protobuf.Option + 4, // 2: google.protobuf.Api.source_context:type_name -> google.protobuf.SourceContext + 2, // 3: google.protobuf.Api.mixins:type_name -> google.protobuf.Mixin + 5, // 4: google.protobuf.Api.syntax:type_name -> google.protobuf.Syntax + 3, // 5: google.protobuf.Method.options:type_name -> google.protobuf.Option + 5, // 6: google.protobuf.Method.syntax:type_name -> google.protobuf.Syntax + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_protobuf_api_proto_init() } +func file_google_protobuf_api_proto_init() { + if File_google_protobuf_api_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Api); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Method); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mixin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_api_proto_goTypes, + DependencyIndexes: file_google_protobuf_api_proto_depIdxs, + MessageInfos: file_google_protobuf_api_proto_msgTypes, + }.Build() + File_google_protobuf_api_proto = out.File + file_google_protobuf_api_proto_rawDesc = nil + file_google_protobuf_api_proto_goTypes = nil + file_google_protobuf_api_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 00000000..df709a8d --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,374 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// # Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// # Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []interface{}{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go new file mode 100644 index 00000000..9a7277ba --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -0,0 +1,166 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package emptypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} +} + +var File_google_protobuf_empty_proto protoreflect.FileDescriptor + +var file_google_protobuf_empty_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, + 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_empty_proto_rawDescOnce sync.Once + file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc +) + +func file_google_protobuf_empty_proto_rawDescGZIP() []byte { + file_google_protobuf_empty_proto_rawDescOnce.Do(func() { + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + }) + return file_google_protobuf_empty_proto_rawDescData +} + +var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_empty_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: google.protobuf.Empty +} +var file_google_protobuf_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_empty_proto_init() } +func file_google_protobuf_empty_proto_init() { + if File_google_protobuf_empty_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_empty_proto_goTypes, + DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, + MessageInfos: file_google_protobuf_empty_proto_msgTypes, + }.Build() + File_google_protobuf_empty_proto = out.File + file_google_protobuf_empty_proto_rawDesc = nil + file_google_protobuf_empty_proto_goTypes = nil + file_google_protobuf_empty_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go new file mode 100644 index 00000000..e8789cb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -0,0 +1,588 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// # Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// # Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. +package fieldmaskpb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sort "sort" + strings "strings" + sync "sync" +) + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + + // Repeated fields are only allowed at the last position. + if fd.IsList() || fd.IsMap() { + md = nil + } + + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + +func (x *FieldMask) Reset() { + *x = FieldMask{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMask) ProtoMessage() {} + +func (x *FieldMask) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. +func (*FieldMask) Descriptor() ([]byte, []int) { + return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldMask) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor + +var file_google_protobuf_field_mask_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_field_mask_proto_rawDescOnce sync.Once + file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc +) + +func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { + file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + }) + return file_google_protobuf_field_mask_proto_rawDescData +} + +var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ + (*FieldMask)(nil), // 0: google.protobuf.FieldMask +} +var file_google_protobuf_field_mask_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_field_mask_proto_init() } +func file_google_protobuf_field_mask_proto_init() { + if File_google_protobuf_field_mask_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_field_mask_proto_goTypes, + DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, + MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, + }.Build() + File_google_protobuf_field_mask_proto = out.File + file_google_protobuf_field_mask_proto_rawDesc = nil + file_google_protobuf_field_mask_proto_goTypes = nil + file_google_protobuf_field_mask_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go new file mode 100644 index 00000000..0980d5ae --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go @@ -0,0 +1,176 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package sourcecontextpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` +} + +func (x *SourceContext) Reset() { + *x = SourceContext{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_source_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceContext) ProtoMessage() {} + +func (x *SourceContext) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_source_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceContext.ProtoReflect.Descriptor instead. +func (*SourceContext) Descriptor() ([]byte, []int) { + return file_google_protobuf_source_context_proto_rawDescGZIP(), []int{0} +} + +func (x *SourceContext) GetFileName() string { + if x != nil { + return x.FileName + } + return "" +} + +var File_google_protobuf_source_context_proto protoreflect.FileDescriptor + +var file_google_protobuf_source_context_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x2c, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x8a, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x12, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_source_context_proto_rawDescOnce sync.Once + file_google_protobuf_source_context_proto_rawDescData = file_google_protobuf_source_context_proto_rawDesc +) + +func file_google_protobuf_source_context_proto_rawDescGZIP() []byte { + file_google_protobuf_source_context_proto_rawDescOnce.Do(func() { + file_google_protobuf_source_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_source_context_proto_rawDescData) + }) + return file_google_protobuf_source_context_proto_rawDescData +} + +var file_google_protobuf_source_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_source_context_proto_goTypes = []interface{}{ + (*SourceContext)(nil), // 0: google.protobuf.SourceContext +} +var file_google_protobuf_source_context_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_source_context_proto_init() } +func file_google_protobuf_source_context_proto_init() { + if File_google_protobuf_source_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_source_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_source_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_source_context_proto_goTypes, + DependencyIndexes: file_google_protobuf_source_context_proto_depIdxs, + MessageInfos: file_google_protobuf_source_context_proto_msgTypes, + }.Build() + File_google_protobuf_source_context_proto = out.File + file_google_protobuf_source_context_proto_rawDesc = nil + file_google_protobuf_source_context_proto_goTypes = nil + file_google_protobuf_source_context_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go new file mode 100644 index 00000000..d2bac8b8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -0,0 +1,810 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +// Package structpb contains generated types for google/protobuf/struct.proto. +// +// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are +// used to represent arbitrary JSON. The Value message represents a JSON value, +// the Struct message represents a JSON object, and the ListValue message +// represents a JSON array. See https://json.org for more information. +// +// The Value, Struct, and ListValue types have generated MarshalJSON and +// UnmarshalJSON methods such that they serialize JSON equivalent to what the +// messages themselves represent. Use of these types with the +// "google.golang.org/protobuf/encoding/protojson" package +// ensures that they will be serialized as their JSON equivalent. +// +// # Conversion to and from a Go interface +// +// The standard Go "encoding/json" package has functionality to serialize +// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and +// ListValue.AsSlice methods can convert the protobuf message representation into +// a form represented by interface{}, map[string]interface{}, and []interface{}. +// This form can be used with other packages that operate on such data structures +// and also directly with the standard json package. +// +// In order to convert the interface{}, map[string]interface{}, and []interface{} +// forms back as Value, Struct, and ListValue messages, use the NewStruct, +// NewList, and NewValue constructor functions. +// +// # Example usage +// +// Consider the following example JSON object: +// +// { +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": { +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100" +// }, +// "phoneNumbers": [ +// { +// "type": "home", +// "number": "212 555-1234" +// }, +// { +// "type": "office", +// "number": "646 555-4567" +// } +// ], +// "children": [], +// "spouse": null +// } +// +// To construct a Value message representing the above JSON object: +// +// m, err := structpb.NewValue(map[string]interface{}{ +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": map[string]interface{}{ +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100", +// }, +// "phoneNumbers": []interface{}{ +// map[string]interface{}{ +// "type": "home", +// "number": "212 555-1234", +// }, +// map[string]interface{}{ +// "type": "office", +// "number": "646 555-4567", +// }, +// }, +// "children": []interface{}{}, +// "spouse": nil, +// }) +// if err != nil { +// ... // handle error +// } +// ... // make use of m as a *structpb.Value +package structpb + +import ( + base64 "encoding/base64" + protojson "google.golang.org/protobuf/encoding/protojson" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + utf8 "unicode/utf8" +) + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +// Enum value maps for NullValue. +var ( + NullValue_name = map[int32]string{ + 0: "NULL_VALUE", + } + NullValue_value = map[string]int32{ + "NULL_VALUE": 0, + } +) + +func (x NullValue) Enum() *NullValue { + p := new(NullValue) + *p = x + return p +} + +func (x NullValue) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NullValue) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_struct_proto_enumTypes[0].Descriptor() +} + +func (NullValue) Type() protoreflect.EnumType { + return &file_google_protobuf_struct_proto_enumTypes[0] +} + +func (x NullValue) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NullValue.Descriptor instead. +func (NullValue) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// NewStruct constructs a Struct from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func NewStruct(v map[string]interface{}) (*Struct, error) { + x := &Struct{Fields: make(map[string]*Value, len(v))} + for k, v := range v { + if !utf8.ValidString(k) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) + } + var err error + x.Fields[k], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsMap converts x to a general-purpose Go map. +// The map values are converted by calling Value.AsInterface. +func (x *Struct) AsMap() map[string]interface{} { + f := x.GetFields() + vs := make(map[string]interface{}, len(f)) + for k, v := range f { + vs[k] = v.AsInterface() + } + return vs +} + +func (x *Struct) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Struct) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Struct) Reset() { + *x = Struct{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Struct) ProtoMessage() {} + +func (x *Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Struct.ProtoReflect.Descriptor instead. +func (*Struct) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *Struct) GetFields() map[string]*Value { + if x != nil { + return x.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of these +// variants. Absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of value. + // + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +// NewValue constructs a Value from a general-purpose Go interface. +// +// ╔════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]interface{} │ stored as StructValue ║ +// ║ []interface{} │ stored as ListValue ║ +// ╚════════════════════════╧════════════════════════════════════════════╝ +// +// When converting an int64 or uint64 to a NumberValue, numeric precision loss +// is possible since they are stored as a float64. +func NewValue(v interface{}) (*Value, error) { + switch v := v.(type) { + case nil: + return NewNullValue(), nil + case bool: + return NewBoolValue(v), nil + case int: + return NewNumberValue(float64(v)), nil + case int32: + return NewNumberValue(float64(v)), nil + case int64: + return NewNumberValue(float64(v)), nil + case uint: + return NewNumberValue(float64(v)), nil + case uint32: + return NewNumberValue(float64(v)), nil + case uint64: + return NewNumberValue(float64(v)), nil + case float32: + return NewNumberValue(float64(v)), nil + case float64: + return NewNumberValue(float64(v)), nil + case string: + if !utf8.ValidString(v) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) + } + return NewStringValue(v), nil + case []byte: + s := base64.StdEncoding.EncodeToString(v) + return NewStringValue(s), nil + case map[string]interface{}: + v2, err := NewStruct(v) + if err != nil { + return nil, err + } + return NewStructValue(v2), nil + case []interface{}: + v2, err := NewList(v) + if err != nil { + return nil, err + } + return NewListValue(v2), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewNullValue constructs a new null Value. +func NewNullValue() *Value { + return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *Value { + return &Value{Kind: &Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *Value { + return &Value{Kind: &Value_NumberValue{NumberValue: v}} +} + +// NewStringValue constructs a new string Value. +func NewStringValue(v string) *Value { + return &Value{Kind: &Value_StringValue{StringValue: v}} +} + +// NewStructValue constructs a new struct Value. +func NewStructValue(v *Struct) *Value { + return &Value{Kind: &Value_StructValue{StructValue: v}} +} + +// NewListValue constructs a new list Value. +func NewListValue(v *ListValue) *Value { + return &Value{Kind: &Value_ListValue{ListValue: v}} +} + +// AsInterface converts x to a general-purpose Go interface. +// +// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce +// semantically equivalent JSON (assuming no errors occur). +// +// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are +// converted as strings to remain compatible with MarshalJSON. +func (x *Value) AsInterface() interface{} { + switch v := x.GetKind().(type) { + case *Value_NumberValue: + if v != nil { + switch { + case math.IsNaN(v.NumberValue): + return "NaN" + case math.IsInf(v.NumberValue, +1): + return "Infinity" + case math.IsInf(v.NumberValue, -1): + return "-Infinity" + default: + return v.NumberValue + } + } + case *Value_StringValue: + if v != nil { + return v.StringValue + } + case *Value_BoolValue: + if v != nil { + return v.BoolValue + } + case *Value_StructValue: + if v != nil { + return v.StructValue.AsMap() + } + case *Value_ListValue: + if v != nil { + return v.ListValue.AsSlice() + } + } + return nil +} + +func (x *Value) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Value) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (x *Value) GetNumberValue() float64 { + if x, ok := x.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetStructValue() *Struct { + if x, ok := x.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + // Represents a null value. + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + // Represents a double value. + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + // Represents a string value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + // Represents a boolean value. + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + // Represents a structured value. + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + // Represents a repeated `Value`. + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +// NewList constructs a ListValue from a general-purpose Go slice. +// The slice elements are converted using NewValue. +func NewList(v []interface{}) (*ListValue, error) { + x := &ListValue{Values: make([]*Value, len(v))} + for i, v := range v { + var err error + x.Values[i], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsSlice converts x to a general-purpose Go slice. +// The slice elements are converted by calling Value.AsInterface. +func (x *ListValue) AsSlice() []interface{} { + vals := x.GetValues() + vs := make([]interface{}, len(vals)) + for i, v := range vals { + vs[i] = v.AsInterface() + } + return vs +} + +func (x *ListValue) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *ListValue) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +var File_google_protobuf_struct_proto protoreflect.FileDescriptor + +var file_google_protobuf_struct_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, + 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, + 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, + 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, + 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, + 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, + 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_protobuf_struct_proto_rawDescOnce sync.Once + file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc +) + +func file_google_protobuf_struct_proto_rawDescGZIP() []byte { + file_google_protobuf_struct_proto_rawDescOnce.Do(func() { + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + }) + return file_google_protobuf_struct_proto_rawDescData +} + +var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_struct_proto_goTypes = []interface{}{ + (NullValue)(0), // 0: google.protobuf.NullValue + (*Struct)(nil), // 1: google.protobuf.Struct + (*Value)(nil), // 2: google.protobuf.Value + (*ListValue)(nil), // 3: google.protobuf.ListValue + nil, // 4: google.protobuf.Struct.FieldsEntry +} +var file_google_protobuf_struct_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry + 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct + 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue + 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value + 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_protobuf_struct_proto_init() } +func file_google_protobuf_struct_proto_init() { + if File_google_protobuf_struct_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Struct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_struct_proto_goTypes, + DependencyIndexes: file_google_protobuf_struct_proto_depIdxs, + EnumInfos: file_google_protobuf_struct_proto_enumTypes, + MessageInfos: file_google_protobuf_struct_proto_msgTypes, + }.Build() + File_google_protobuf_struct_proto = out.File + file_google_protobuf_struct_proto_rawDesc = nil + file_google_protobuf_struct_proto_goTypes = nil + file_google_protobuf_struct_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go new file mode 100644 index 00000000..4cb8d0a5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go @@ -0,0 +1,990 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/type.proto + +package typepb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + sourcecontextpb "google.golang.org/protobuf/types/known/sourcecontextpb" + reflect "reflect" + sync "sync" +) + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 + // Syntax `editions`. + Syntax_SYNTAX_EDITIONS Syntax = 2 +) + +// Enum value maps for Syntax. +var ( + Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", + 2: "SYNTAX_EDITIONS", + } + Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, + "SYNTAX_EDITIONS": 2, + } +) + +func (x Syntax) Enum() *Syntax { + p := new(Syntax) + *p = x + return p +} + +func (x Syntax) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Syntax) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[0].Descriptor() +} + +func (Syntax) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[0] +} + +func (x Syntax) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Syntax.Descriptor instead. +func (Syntax) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +// Enum value maps for Field_Kind. +var ( + Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x Field_Kind) Enum() *Field_Kind { + p := new(Field_Kind) + *p = x + return p +} + +func (x Field_Kind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Field_Kind) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[1].Descriptor() +} + +func (Field_Kind) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[1] +} + +func (x Field_Kind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Field_Kind.Descriptor instead. +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +// Enum value maps for Field_Cardinality. +var ( + Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", + } + Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, + } +) + +func (x Field_Cardinality) Enum() *Field_Cardinality { + p := new(Field_Cardinality) + *p = x + return p +} + +func (x Field_Cardinality) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Field_Cardinality) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[2].Descriptor() +} + +func (Field_Cardinality) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[2] +} + +func (x Field_Cardinality) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Field_Cardinality.Descriptor instead. +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + // The source edition string, only valid when syntax is SYNTAX_EDITIONS. + Edition string `protobuf:"bytes,7,opt,name=edition,proto3" json:"edition,omitempty"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{0} +} + +func (x *Type) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type) GetFields() []*Field { + if x != nil { + return x.Fields + } + return nil +} + +func (x *Type) GetOneofs() []string { + if x != nil { + return x.Oneofs + } + return nil +} + +func (x *Type) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Type) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Type) GetSyntax() Syntax { + if x != nil { + return x.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (x *Type) GetEdition() string { + if x != nil { + return x.Edition + } + return "" +} + +// A single field of a message type. +type Field struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *Field) Reset() { + *x = Field{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Field) ProtoMessage() {} + +func (x *Field) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Field.ProtoReflect.Descriptor instead. +func (*Field) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1} +} + +func (x *Field) GetKind() Field_Kind { + if x != nil { + return x.Kind + } + return Field_TYPE_UNKNOWN +} + +func (x *Field) GetCardinality() Field_Cardinality { + if x != nil { + return x.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (x *Field) GetNumber() int32 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *Field) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Field) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Field) GetOneofIndex() int32 { + if x != nil { + return x.OneofIndex + } + return 0 +} + +func (x *Field) GetPacked() bool { + if x != nil { + return x.Packed + } + return false +} + +func (x *Field) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Field) GetJsonName() string { + if x != nil { + return x.JsonName + } + return "" +} + +func (x *Field) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// Enum type definition. +type Enum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + // The source edition string, only valid when syntax is SYNTAX_EDITIONS. + Edition string `protobuf:"bytes,6,opt,name=edition,proto3" json:"edition,omitempty"` +} + +func (x *Enum) Reset() { + *x = Enum{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Enum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Enum) ProtoMessage() {} + +func (x *Enum) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Enum.ProtoReflect.Descriptor instead. +func (*Enum) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{2} +} + +func (x *Enum) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Enum) GetEnumvalue() []*EnumValue { + if x != nil { + return x.Enumvalue + } + return nil +} + +func (x *Enum) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Enum) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Enum) GetSyntax() Syntax { + if x != nil { + return x.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (x *Enum) GetEdition() string { + if x != nil { + return x.Edition + } + return "" +} + +// Enum value definition. +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{3} +} + +func (x *EnumValue) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EnumValue) GetNumber() int32 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *EnumValue) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *anypb.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Option) Reset() { + *x = Option{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Option) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Option) ProtoMessage() {} + +func (x *Option) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Option.ProtoReflect.Descriptor instead. +func (*Option) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{4} +} + +func (x *Option) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Option) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_type_proto protoreflect.FileDescriptor + +var file_google_protobuf_type_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb4, 0x06, 0x0a, 0x05, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x2f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x12, 0x44, 0x0a, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x2e, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x63, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc8, + 0x02, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, + 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, + 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, + 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, + 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, + 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x74, 0x0a, 0x0b, 0x43, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x41, 0x52, 0x44, + 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x43, + 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, + 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, + 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, + 0x99, 0x02, 0x0a, 0x04, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6a, 0x0a, 0x09, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x06, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2a, 0x43, 0x0a, 0x06, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x11, 0x0a, 0x0d, 0x53, + 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x53, 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x02, 0x42, 0x7b, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x09, 0x54, + 0x79, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_type_proto_rawDescOnce sync.Once + file_google_protobuf_type_proto_rawDescData = file_google_protobuf_type_proto_rawDesc +) + +func file_google_protobuf_type_proto_rawDescGZIP() []byte { + file_google_protobuf_type_proto_rawDescOnce.Do(func() { + file_google_protobuf_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_type_proto_rawDescData) + }) + return file_google_protobuf_type_proto_rawDescData +} + +var file_google_protobuf_type_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_protobuf_type_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_protobuf_type_proto_goTypes = []interface{}{ + (Syntax)(0), // 0: google.protobuf.Syntax + (Field_Kind)(0), // 1: google.protobuf.Field.Kind + (Field_Cardinality)(0), // 2: google.protobuf.Field.Cardinality + (*Type)(nil), // 3: google.protobuf.Type + (*Field)(nil), // 4: google.protobuf.Field + (*Enum)(nil), // 5: google.protobuf.Enum + (*EnumValue)(nil), // 6: google.protobuf.EnumValue + (*Option)(nil), // 7: google.protobuf.Option + (*sourcecontextpb.SourceContext)(nil), // 8: google.protobuf.SourceContext + (*anypb.Any)(nil), // 9: google.protobuf.Any +} +var file_google_protobuf_type_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Type.fields:type_name -> google.protobuf.Field + 7, // 1: google.protobuf.Type.options:type_name -> google.protobuf.Option + 8, // 2: google.protobuf.Type.source_context:type_name -> google.protobuf.SourceContext + 0, // 3: google.protobuf.Type.syntax:type_name -> google.protobuf.Syntax + 1, // 4: google.protobuf.Field.kind:type_name -> google.protobuf.Field.Kind + 2, // 5: google.protobuf.Field.cardinality:type_name -> google.protobuf.Field.Cardinality + 7, // 6: google.protobuf.Field.options:type_name -> google.protobuf.Option + 6, // 7: google.protobuf.Enum.enumvalue:type_name -> google.protobuf.EnumValue + 7, // 8: google.protobuf.Enum.options:type_name -> google.protobuf.Option + 8, // 9: google.protobuf.Enum.source_context:type_name -> google.protobuf.SourceContext + 0, // 10: google.protobuf.Enum.syntax:type_name -> google.protobuf.Syntax + 7, // 11: google.protobuf.EnumValue.options:type_name -> google.protobuf.Option + 9, // 12: google.protobuf.Option.value:type_name -> google.protobuf.Any + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_google_protobuf_type_proto_init() } +func file_google_protobuf_type_proto_init() { + if File_google_protobuf_type_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_type_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Field); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Enum); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Option); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_type_proto_rawDesc, + NumEnums: 3, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_type_proto_goTypes, + DependencyIndexes: file_google_protobuf_type_proto_depIdxs, + EnumInfos: file_google_protobuf_type_proto_enumTypes, + MessageInfos: file_google_protobuf_type_proto_msgTypes, + }.Build() + File_google_protobuf_type_proto = out.File + file_google_protobuf_type_proto_rawDesc = nil + file_google_protobuf_type_proto_goTypes = nil + file_google_protobuf_type_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go new file mode 100644 index 00000000..762a8713 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -0,0 +1,760 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Double stores v in a new DoubleValue and returns a pointer to it. +func Double(v float64) *DoubleValue { + return &DoubleValue{Value: v} +} + +func (x *DoubleValue) Reset() { + *x = DoubleValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleValue) ProtoMessage() {} + +func (x *DoubleValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead. +func (*DoubleValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0} +} + +func (x *DoubleValue) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Float stores v in a new FloatValue and returns a pointer to it. +func Float(v float32) *FloatValue { + return &FloatValue{Value: v} +} + +func (x *FloatValue) Reset() { + *x = FloatValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FloatValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatValue) ProtoMessage() {} + +func (x *FloatValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead. +func (*FloatValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatValue) GetValue() float32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int64 stores v in a new Int64Value and returns a pointer to it. +func Int64(v int64) *Int64Value { + return &Int64Value{Value: v} +} + +func (x *Int64Value) Reset() { + *x = Int64Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Value) ProtoMessage() {} + +func (x *Int64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead. +func (*Int64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2} +} + +func (x *Int64Value) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt64 stores v in a new UInt64Value and returns a pointer to it. +func UInt64(v uint64) *UInt64Value { + return &UInt64Value{Value: v} +} + +func (x *UInt64Value) Reset() { + *x = UInt64Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Value) ProtoMessage() {} + +func (x *UInt64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead. +func (*UInt64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3} +} + +func (x *UInt64Value) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int32 stores v in a new Int32Value and returns a pointer to it. +func Int32(v int32) *Int32Value { + return &Int32Value{Value: v} +} + +func (x *Int32Value) Reset() { + *x = Int32Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Value) ProtoMessage() {} + +func (x *Int32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead. +func (*Int32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4} +} + +func (x *Int32Value) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt32 stores v in a new UInt32Value and returns a pointer to it. +func UInt32(v uint32) *UInt32Value { + return &UInt32Value{Value: v} +} + +func (x *UInt32Value) Reset() { + *x = UInt32Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Value) ProtoMessage() {} + +func (x *UInt32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead. +func (*UInt32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Value) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bool stores v in a new BoolValue and returns a pointer to it. +func Bool(v bool) *BoolValue { + return &BoolValue{Value: v} +} + +func (x *BoolValue) Reset() { + *x = BoolValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BoolValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolValue) ProtoMessage() {} + +func (x *BoolValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead. +func (*BoolValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6} +} + +func (x *BoolValue) GetValue() bool { + if x != nil { + return x.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// String stores v in a new StringValue and returns a pointer to it. +func String(v string) *StringValue { + return &StringValue{Value: v} +} + +func (x *StringValue) Reset() { + *x = StringValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringValue) ProtoMessage() {} + +func (x *StringValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringValue.ProtoReflect.Descriptor instead. +func (*StringValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7} +} + +func (x *StringValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bytes stores v in a new BytesValue and returns a pointer to it. +func Bytes(v []byte) *BytesValue { + return &BytesValue{Value: v} +} + +func (x *BytesValue) Reset() { + *x = BytesValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BytesValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesValue) ProtoMessage() {} + +func (x *BytesValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead. +func (*BytesValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8} +} + +func (x *BytesValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor + +var file_google_protobuf_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, + 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_wrappers_proto_rawDescOnce sync.Once + file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc +) + +func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { + file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + }) + return file_google_protobuf_wrappers_proto_rawDescData +} + +var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ + (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue + (*FloatValue)(nil), // 1: google.protobuf.FloatValue + (*Int64Value)(nil), // 2: google.protobuf.Int64Value + (*UInt64Value)(nil), // 3: google.protobuf.UInt64Value + (*Int32Value)(nil), // 4: google.protobuf.Int32Value + (*UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*BoolValue)(nil), // 6: google.protobuf.BoolValue + (*StringValue)(nil), // 7: google.protobuf.StringValue + (*BytesValue)(nil), // 8: google.protobuf.BytesValue +} +var file_google_protobuf_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_wrappers_proto_init() } +func file_google_protobuf_wrappers_proto_init() { + if File_google_protobuf_wrappers_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FloatValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UInt64Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UInt32Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BoolValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BytesValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_wrappers_proto_goTypes, + DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs, + MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, + }.Build() + File_google_protobuf_wrappers_proto = out.File + file_google_protobuf_wrappers_proto_rawDesc = nil + file_google_protobuf_wrappers_proto_goTypes = nil + file_google_protobuf_wrappers_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go new file mode 100644 index 00000000..d0bb96a9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -0,0 +1,656 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +package pluginpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +// Sync with code_generator.h. +type CodeGeneratorResponse_Feature int32 + +const ( + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 +) + +// Enum value maps for CodeGeneratorResponse_Feature. +var ( + CodeGeneratorResponse_Feature_name = map[int32]string{ + 0: "FEATURE_NONE", + 1: "FEATURE_PROTO3_OPTIONAL", + } + CodeGeneratorResponse_Feature_value = map[string]int32{ + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, + } +) + +func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature { + p := new(CodeGeneratorResponse_Feature) + *p = x + return p +} + +func (x CodeGeneratorResponse_Feature) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor() +} + +func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType { + return &file_google_protobuf_compiler_plugin_proto_enumTypes[0] +} + +func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = CodeGeneratorResponse_Feature(num) + return nil +} + +// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead. +func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +// The version number of protocol compiler. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil && x.Major != nil { + return *x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil && x.Minor != nil { + return *x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil && x.Patch != nil { + return *x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (x *CodeGeneratorRequest) Reset() { + *x = CodeGeneratorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead. +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1} +} + +func (x *CodeGeneratorRequest) GetFileToGenerate() []string { + if x != nil { + return x.FileToGenerate + } + return nil +} + +func (x *CodeGeneratorRequest) GetParameter() string { + if x != nil && x.Parameter != nil { + return *x.Parameter + } + return "" +} + +func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.ProtoFile + } + return nil +} + +func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` +} + +func (x *CodeGeneratorResponse) Reset() { + *x = CodeGeneratorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2} +} + +func (x *CodeGeneratorResponse) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { + if x != nil && x.SupportedFeatures != nil { + return *x.SupportedFeatures + } + return 0 +} + +func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if x != nil { + return x.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // + // @@protoc_insertion_point(NAME) + // + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // + // // @@protoc_insertion_point(namespace_scope) + // + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"` +} + +func (x *CodeGeneratorResponse_File) Reset() { + *x = CodeGeneratorResponse_File{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CodeGeneratorResponse_File) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetInsertionPoint() string { + if x != nil && x.InsertionPoint != nil { + return *x.InsertionPoint + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetContent() string { + if x != nil && x.Content != nil { + return *x.Content + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo { + if x != nil { + return x.GeneratedCodeInfo + } + return nil +} + +var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor + +var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, + 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x42, 0x72, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, +} + +var ( + file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once + file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc +) + +func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { + file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() { + file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData) + }) + return file_google_protobuf_compiler_plugin_proto_rawDescData +} + +var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ + (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature + (*Version)(nil), // 1: google.protobuf.compiler.Version + (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest + (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse + (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File + (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto + (*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo +} +var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ + 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto + 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_protobuf_compiler_plugin_proto_init() } +func file_google_protobuf_compiler_plugin_proto_init() { + if File_google_protobuf_compiler_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes, + DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs, + EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes, + MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes, + }.Build() + File_google_protobuf_compiler_plugin_proto = out.File + file_google_protobuf_compiler_plugin_proto_rawDesc = nil + file_google_protobuf_compiler_plugin_proto_goTypes = nil + file_google_protobuf_compiler_plugin_proto_depIdxs = nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 15c651fd..7c8a056b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -73,6 +73,18 @@ github.com/beorn7/perks/quantile ## explicit; go 1.21 github.com/brianvoe/gofakeit/v6 github.com/brianvoe/gofakeit/v6/data +# github.com/bufbuild/protocompile v0.4.0 +## explicit; go 1.18 +github.com/bufbuild/protocompile +github.com/bufbuild/protocompile/ast +github.com/bufbuild/protocompile/internal +github.com/bufbuild/protocompile/linker +github.com/bufbuild/protocompile/options +github.com/bufbuild/protocompile/parser +github.com/bufbuild/protocompile/protoutil +github.com/bufbuild/protocompile/reporter +github.com/bufbuild/protocompile/sourceinfo +github.com/bufbuild/protocompile/walk # github.com/bytedance/sonic v1.8.0 ## explicit; go 1.15 github.com/bytedance/sonic @@ -170,7 +182,12 @@ github.com/gogo/protobuf/types github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/timestamp # github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 ## explicit; go 1.19 github.com/google/pprof/profile @@ -200,6 +217,20 @@ github.com/influxdata/line-protocol/v2/lineprotocol # github.com/itchyny/timefmt-go v0.1.5 ## explicit; go 1.17 github.com/itchyny/timefmt-go +# github.com/jhump/protoreflect v1.15.1 +## explicit; go 1.18 +github.com/jhump/protoreflect/codec +github.com/jhump/protoreflect/desc +github.com/jhump/protoreflect/desc/internal +github.com/jhump/protoreflect/desc/protoparse +github.com/jhump/protoreflect/desc/protoparse/ast +github.com/jhump/protoreflect/desc/sourceinfo +github.com/jhump/protoreflect/dynamic +github.com/jhump/protoreflect/dynamic/grpcdynamic +github.com/jhump/protoreflect/grpcreflect +github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1 +github.com/jhump/protoreflect/internal +github.com/jhump/protoreflect/internal/codec # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go @@ -278,8 +309,8 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.46.0 -## explicit; go 1.19 +# github.com/prometheus/prometheus v0.39.1 +## explicit; go 1.18 github.com/prometheus/prometheus/util/strutil # github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec ## explicit; go 1.12 @@ -373,8 +404,13 @@ golang.org/x/net/icmp golang.org/x/net/idna golang.org/x/net/internal/iana golang.org/x/net/internal/socket +golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 +golang.org/x/net/trace +# golang.org/x/sync v0.4.0 +## explicit; go 1.17 +golang.org/x/sync/semaphore # golang.org/x/sys v0.13.0 ## explicit; go 1.17 golang.org/x/sys/cpu @@ -430,14 +466,72 @@ golang.org/x/tools/internal/typesinternal ## explicit; go 1.11 golang.org/x/xerrors golang.org/x/xerrors/internal +# google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 +## explicit; go 1.19 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.51.0 +## explicit; go 1.17 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/health/grpc_health_v1 +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/status +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/peer +google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1alpha +google.golang.org/grpc/resolver +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap # google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text @@ -459,8 +553,18 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/dynamicpb google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/apipb +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/emptypb +google.golang.org/protobuf/types/known/fieldmaskpb +google.golang.org/protobuf/types/known/sourcecontextpb +google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/known/typepb +google.golang.org/protobuf/types/known/wrapperspb +google.golang.org/protobuf/types/pluginpb # gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17 ## explicit; go 1.12 gopkg.in/CodapeWild/dd-trace-go.v1/ddtrace